content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# Yige Wu @WashU Jun 2021
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
setwd(dir_base)
source("./ccRCC_snRNA_analysis/load_pkgs.R")
source("./ccRCC_snRNA_analysis/functions.R")
source("./ccRCC_snRNA_analysis/variables.R")
library(clusterProfiler)
library(biomaRt)
library(org.Hs.eg.db)
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input peak fold changes
peaks_anno_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Peak_Annotation/All_peaks_annotated_26snATAC_merged_obj.20210607.tsv")
## input coaccessiblity results
coaccess_peak2genes_df <- fread(data.table = F, input = "./Resources/Analysis_Results/snatac/da_peaks/annotate_peaks/annotate_coaccessible_peaks/20210615.v1/Coaccessible_Peaks.Annotated.20210615.v1.tsv")
## get biomart
ensembl <- useMart("ensembl")
datasets <- listDatasets(ensembl)
ensembl = useDataset("hsapiens_gene_ensembl",mart=ensembl)
filters = listFilters(ensembl)
# annotate and filter peaks ------------------------------------------------------------
## annotate peaks
peaks_anno_df <- peaks_anno_df %>%
mutate(peak2gene_type = str_split_fixed(string = annotation, pattern = " \\(", n = 2)[,1]) %>%
rename(Gene = SYMBOL) %>%
select(peak, peak2gene_type, Gene)
# peaks_anno_df %>%
# filter(peak2gene_type == "Promoter") %>%
# nrow()
#
# peaks_anno_df %>%
# filter(peak2gene_type != "Promoter") %>%
# nrow()
# annotate with coaccessiblity results ------------------------------------
peaks_anno_wcoaccess_df <- merge(x = peaks_anno_df,
y = coaccess_peak2genes_df %>%
select(Peak1, Peak2, peak2gene_type.2, genesymbol.2, coaccess) %>%
rename(peak.coaccess = Peak2) %>%
rename(peak2gene_type.coaccess = peak2gene_type.2) %>%
rename(genesymbol.coaccess = genesymbol.2) %>%
rename(coaccess_score = coaccess),
by.x = c("peak"), by.y = c("Peak1"))
peak2gene_enhancers_df <- peaks_anno_wcoaccess_df %>%
filter(peak2gene_type != "Promoter" & peak2gene_type.coaccess == "Promoter")
peak2gene_enhancers_df %>%
select(peak) %>%
unique() %>%
nrow()
peak2gene_enh_pro_df <- rbind(peak2gene_enhancers_df %>%
mutate(peak2gene_type = "Enhancer") %>%
mutate(Gene = genesymbol.coaccess),
peaks_anno_df %>%
filter(peak2gene_type == "Promoter") %>%
mutate(peak.coaccess = NA) %>%
mutate(peak2gene_type.coaccess = NA) %>%
mutate(genesymbol.coaccess = NA) %>%
mutate(coaccess_score = NA))
# annotate with entrez ids ------------------------------------------------
genes2convert <- unique(c(peaks_anno_df$Gene, peak2gene_enh_pro_df$Gene))
## retrieve entrezgene_id
genesymbol2entrezid_df <- getBM(attributes=c('entrezgene_id', 'hgnc_symbol'),
filters = 'hgnc_symbol',
values = genes2convert,
mart = ensembl)
## add entrez ids to the deg table
peaks_anno_df$entrezgene_id <- mapvalues(x = peaks_anno_df$Gene, from = genesymbol2entrezid_df$hgnc_symbol, to = as.vector(genesymbol2entrezid_df$entrezgene_id))
peak2gene_enh_pro_df$entrezgene_id <- mapvalues(x = peak2gene_enh_pro_df$Gene, from = genesymbol2entrezid_df$hgnc_symbol, to = as.vector(genesymbol2entrezid_df$entrezgene_id))
# write outupt ------------------------------------------------------------
file2write <- paste0(dir_out, "ccRCC_vs_PT_DAPs.Annotated.", run_id, ".tsv")
write.table(file = file2write, x = peaks_anno_df, quote = F, sep = "\t", row.names = F)
file2write <- paste0(dir_out, "ccRCC_vs_PT_DAP2Gene.EnhancerPromoter.", run_id, ".tsv")
write.table(file = file2write, x = peak2gene_enh_pro_df, quote = F, sep = "\t", row.names = F)
| /snatac/da_peaks/annotate_peaks/annotate_all_peaks_promoters_enhancers.R | no_license | ding-lab/ccRCC_snRNA_analysis | R | false | false | 4,328 | r | # Yige Wu @WashU Jun 2021
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
setwd(dir_base)
source("./ccRCC_snRNA_analysis/load_pkgs.R")
source("./ccRCC_snRNA_analysis/functions.R")
source("./ccRCC_snRNA_analysis/variables.R")
library(clusterProfiler)
library(biomaRt)
library(org.Hs.eg.db)
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input peak fold changes
peaks_anno_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Peak_Annotation/All_peaks_annotated_26snATAC_merged_obj.20210607.tsv")
## input coaccessiblity results
coaccess_peak2genes_df <- fread(data.table = F, input = "./Resources/Analysis_Results/snatac/da_peaks/annotate_peaks/annotate_coaccessible_peaks/20210615.v1/Coaccessible_Peaks.Annotated.20210615.v1.tsv")
## get biomart
ensembl <- useMart("ensembl")
datasets <- listDatasets(ensembl)
ensembl = useDataset("hsapiens_gene_ensembl",mart=ensembl)
filters = listFilters(ensembl)
# annotate and filter peaks ------------------------------------------------------------
## annotate peaks
peaks_anno_df <- peaks_anno_df %>%
mutate(peak2gene_type = str_split_fixed(string = annotation, pattern = " \\(", n = 2)[,1]) %>%
rename(Gene = SYMBOL) %>%
select(peak, peak2gene_type, Gene)
# peaks_anno_df %>%
# filter(peak2gene_type == "Promoter") %>%
# nrow()
#
# peaks_anno_df %>%
# filter(peak2gene_type != "Promoter") %>%
# nrow()
# annotate with coaccessiblity results ------------------------------------
peaks_anno_wcoaccess_df <- merge(x = peaks_anno_df,
y = coaccess_peak2genes_df %>%
select(Peak1, Peak2, peak2gene_type.2, genesymbol.2, coaccess) %>%
rename(peak.coaccess = Peak2) %>%
rename(peak2gene_type.coaccess = peak2gene_type.2) %>%
rename(genesymbol.coaccess = genesymbol.2) %>%
rename(coaccess_score = coaccess),
by.x = c("peak"), by.y = c("Peak1"))
peak2gene_enhancers_df <- peaks_anno_wcoaccess_df %>%
filter(peak2gene_type != "Promoter" & peak2gene_type.coaccess == "Promoter")
peak2gene_enhancers_df %>%
select(peak) %>%
unique() %>%
nrow()
peak2gene_enh_pro_df <- rbind(peak2gene_enhancers_df %>%
mutate(peak2gene_type = "Enhancer") %>%
mutate(Gene = genesymbol.coaccess),
peaks_anno_df %>%
filter(peak2gene_type == "Promoter") %>%
mutate(peak.coaccess = NA) %>%
mutate(peak2gene_type.coaccess = NA) %>%
mutate(genesymbol.coaccess = NA) %>%
mutate(coaccess_score = NA))
# annotate with entrez ids ------------------------------------------------
genes2convert <- unique(c(peaks_anno_df$Gene, peak2gene_enh_pro_df$Gene))
## retrieve entrezgene_id
genesymbol2entrezid_df <- getBM(attributes=c('entrezgene_id', 'hgnc_symbol'),
filters = 'hgnc_symbol',
values = genes2convert,
mart = ensembl)
## add entrez ids to the deg table
peaks_anno_df$entrezgene_id <- mapvalues(x = peaks_anno_df$Gene, from = genesymbol2entrezid_df$hgnc_symbol, to = as.vector(genesymbol2entrezid_df$entrezgene_id))
peak2gene_enh_pro_df$entrezgene_id <- mapvalues(x = peak2gene_enh_pro_df$Gene, from = genesymbol2entrezid_df$hgnc_symbol, to = as.vector(genesymbol2entrezid_df$entrezgene_id))
# write outupt ------------------------------------------------------------
file2write <- paste0(dir_out, "ccRCC_vs_PT_DAPs.Annotated.", run_id, ".tsv")
write.table(file = file2write, x = peaks_anno_df, quote = F, sep = "\t", row.names = F)
file2write <- paste0(dir_out, "ccRCC_vs_PT_DAP2Gene.EnhancerPromoter.", run_id, ".tsv")
write.table(file = file2write, x = peak2gene_enh_pro_df, quote = F, sep = "\t", row.names = F)
|
view1<- function(str1){
print(str1)
}
view2<-function(num1){
for(i in num1)
print(num1)
}
view2(c(1:5))
str1<-"happy"
view1(str1)
#-------------------------------
# 문제 1
total<- function(a,b){
return(a+b)
}
rs<- function(sum){
if(sum>=800)
return("합격")
else
return("불합격")
}
toeic<-750
it<-65
cat("입사총점: ",total(toeic,it),"\n")
cat("입사결과: ",rs(total(toeic,it)),"\n")
| /r_lec/step01_basic/step05_function/d_func4.R | no_license | woojae05/Programing-R | R | false | false | 431 | r | view1<- function(str1){
print(str1)
}
view2<-function(num1){
for(i in num1)
print(num1)
}
view2(c(1:5))
str1<-"happy"
view1(str1)
#-------------------------------
# 문제 1
total<- function(a,b){
return(a+b)
}
rs<- function(sum){
if(sum>=800)
return("합격")
else
return("불합격")
}
toeic<-750
it<-65
cat("입사총점: ",total(toeic,it),"\n")
cat("입사결과: ",rs(total(toeic,it)),"\n")
|
#' Fits an ellipse model to data from getPhotoreceptorCoordinates
#'
#' @examples
#' \dontrun{
#' getPhotoreceptorCoordinates(path, ConeFundamentals10) %>%
#' filter(patid == 1, seye == "OD", frequency == 2) %>%
#' fitEllipse() %>%
#' plotEllipse()
#' }
#' @export
fitEllipse <- function(photoreceptorCoordinates,
x = "lcone",
y = "mcone",
a1 = 1,
a2 = 1,
alpha = 1)
{
xvalues <- photoreceptorCoordinates[, x]
yvalues <- photoreceptorCoordinates[, y]
k <- yvalues / xvalues
xtimesy <- sqrt(xvalues ^ 2 + yvalues ^ 2)
model <- minpack.lm::nlsLM(
xtimesy ~ ellipseFunction(k, a1, a2, alpha),
start = list(a1 = a1,
a2 = a2,
alpha = alpha),
control = list(maxiter = 800, ftol = 0.001836)
)
yvaluesPredict <- predict(model)
xvaluesPredict <- ifelse(k == Inf, 0, yvaluesPredict / k)
xneu <- c(xvaluesPredict,-xvaluesPredict)
yneu <- c(yvaluesPredict,-yvaluesPredict)
tabe <- data.frame(xneu, yneu)
hab <- data.frame(summary(model)$parameters)
b1 <- hab[1, 1]
b2 <- hab[2, 1]
beta <- hab[3, 1]
ang <- seq(0, 360, 1)
k <- sin(ang * pi / 180) / cos(ang * pi / 180)
output <- list()
output$coords <- c(x, y)
output$xvalues <- xvalues
output$yvalues <- yvalues
output$values <- data.frame(Names = c("a1", "a2", "alpha"),
Values = hab)
output$rsquared <- sum(residuals(model) ^ 2)
output$k <- k
output$results <- data.frame(
Rsquared = sum(residuals(model) ^ 2),
a1Estimate = hab[1, 1],
a1StdError = hab[1, 2],
a1tvalue = hab[1, 3],
a1Pr = hab[1, 4],
a2Estimate = hab[2, 1],
a2StdError = hab[2, 2],
a2tvalue = hab[2, 3],
a2Pr = hab[2, 4],
alphaEstimate = hab[3, 1],
alphaStdError = hab[3, 2],
alphatvalue = hab[3, 3],
alphaPr = hab[3, 4]
)
return(output)
}
| /R/fitEllipse.R | no_license | julienfars/flickerbox | R | false | false | 1,948 | r | #' Fits an ellipse model to data from getPhotoreceptorCoordinates
#'
#' @examples
#' \dontrun{
#' getPhotoreceptorCoordinates(path, ConeFundamentals10) %>%
#' filter(patid == 1, seye == "OD", frequency == 2) %>%
#' fitEllipse() %>%
#' plotEllipse()
#' }
#' @export
fitEllipse <- function(photoreceptorCoordinates,
x = "lcone",
y = "mcone",
a1 = 1,
a2 = 1,
alpha = 1)
{
xvalues <- photoreceptorCoordinates[, x]
yvalues <- photoreceptorCoordinates[, y]
k <- yvalues / xvalues
xtimesy <- sqrt(xvalues ^ 2 + yvalues ^ 2)
model <- minpack.lm::nlsLM(
xtimesy ~ ellipseFunction(k, a1, a2, alpha),
start = list(a1 = a1,
a2 = a2,
alpha = alpha),
control = list(maxiter = 800, ftol = 0.001836)
)
yvaluesPredict <- predict(model)
xvaluesPredict <- ifelse(k == Inf, 0, yvaluesPredict / k)
xneu <- c(xvaluesPredict,-xvaluesPredict)
yneu <- c(yvaluesPredict,-yvaluesPredict)
tabe <- data.frame(xneu, yneu)
hab <- data.frame(summary(model)$parameters)
b1 <- hab[1, 1]
b2 <- hab[2, 1]
beta <- hab[3, 1]
ang <- seq(0, 360, 1)
k <- sin(ang * pi / 180) / cos(ang * pi / 180)
output <- list()
output$coords <- c(x, y)
output$xvalues <- xvalues
output$yvalues <- yvalues
output$values <- data.frame(Names = c("a1", "a2", "alpha"),
Values = hab)
output$rsquared <- sum(residuals(model) ^ 2)
output$k <- k
output$results <- data.frame(
Rsquared = sum(residuals(model) ^ 2),
a1Estimate = hab[1, 1],
a1StdError = hab[1, 2],
a1tvalue = hab[1, 3],
a1Pr = hab[1, 4],
a2Estimate = hab[2, 1],
a2StdError = hab[2, 2],
a2tvalue = hab[2, 3],
a2Pr = hab[2, 4],
alphaEstimate = hab[3, 1],
alphaStdError = hab[3, 2],
alphatvalue = hab[3, 3],
alphaPr = hab[3, 4]
)
return(output)
}
|
library(testthat)
library(rjumanpp)
test_check("rjumanpp")
| /tests/testthat.R | permissive | ymattu/rjumanpp | R | false | false | 60 | r | library(testthat)
library(rjumanpp)
test_check("rjumanpp")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/P03.R
\docType{data}
\name{P03}
\alias{P03}
\title{Raw precipitation Gamorel}
\format{
A data.frame with 2 columns:
\describe{
\item{date}{dates in POSIXct format}
\item{precipitation}{Precipitation depth (mm)}
}
}
\source{
\url{http://agua.unorte.edu.uy/}
}
\usage{
P03
}
\description{
Raw precipitation Gamorel
}
\keyword{datasets}
| /man/P03.Rd | no_license | rafaelnavas/SanAntonioData | R | false | true | 416 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/P03.R
\docType{data}
\name{P03}
\alias{P03}
\title{Raw precipitation Gamorel}
\format{
A data.frame with 2 columns:
\describe{
\item{date}{dates in POSIXct format}
\item{precipitation}{Precipitation depth (mm)}
}
}
\source{
\url{http://agua.unorte.edu.uy/}
}
\usage{
P03
}
\description{
Raw precipitation Gamorel
}
\keyword{datasets}
|
context("test-type-vctr")
test_that("constructor sets attributes", {
x <- new_vctr(1:4, class = "x", x = 1)
expect_equal(x, structure(1:4, class = c("x", "vctrs_vctr"), x = 1))
})
test_that(".data must be a vector", {
expect_error(new_vctr(mean), "vector type")
})
test_that("attributes other than names are ignored", {
out <- new_vctr(structure(1, a = 1))
expect_null(attributes(out)$a)
})
test_that("default format method is internal", {
x <- new_vctr(1, class = "x")
expect_equal(format(x), format(x))
})
test_that("vctr class is proxied", {
expect_identical(vec_proxy(new_vctr(1:3)), new_vctr(1:3))
expect_identical(vec_proxy(new_vctr(as.list(1:3))), unclass(new_vctr(as.list(1:3))))
expect_true(vec_is(new_vctr(as.list(1:3))))
})
test_that("attributes must be named", {
expect_error(vec_set_attributes(1, list(1)), "must be named")
expect_error(vec_set_attributes(1, list(y = 1, 2)), "2 does not")
})
test_that("can strip all attributes without adding new ones", {
expect_equal(vec_set_attributes(structure(1, a = 1), NULL), 1)
})
# Cast/restore ------------------------------------------------------------
test_that("cast to NULL returns x", {
x <- new_vctr(1, class = "x")
expect_equal(vec_cast(NULL, x), NULL)
})
test_that("cast succeeds if attributes equal", {
x1 <- new_vctr(1, class = "x", a = 1, b = 2)
x2 <- new_vctr(2, class = "x", a = 1, b = 2)
expect_equal(vec_cast(x1, x2), x1)
expect_equal(vec_cast(x2, x1), x2)
})
test_that("and fails if attributes are different", {
x1 <- new_vctr(1, class = "x", a = 1, b = 2)
x2 <- new_vctr(2, class = "x", a = 2, b = 2)
expect_error(vec_cast(x1, x2), class = "vctrs_error_incompatible_cast")
})
test_that("restoring to atomic vector of same type preserves attributes", {
x1 <- new_vctr(1, class = "x")
x2 <- new_vctr(2, class = "x")
expect_equal(vec_restore(2, x1), x2)
})
test_that("restoring to atomic vector of different type throws error", {
x1 <- new_vctr(1, class = "x")
expect_error(vec_restore("x", x1), class = "vctrs_error_incompatible_cast")
})
test_that("base coercion methods mapped to vec_cast", {
x <- new_vctr(1)
expect_error(as.logical(x), class = "vctrs_error_incompatible_cast")
expect_error(as.integer(x), class = "vctrs_error_incompatible_cast")
expect_error(as.logical(x), class = "vctrs_error_incompatible_cast")
expect_error(as.double(x), class = "vctrs_error_incompatible_cast")
expect_error(as.character(x), class = "vctrs_error_incompatible_cast")
expect_error(as.Date(x), class = "vctrs_error_incompatible_cast")
expect_error(as.POSIXct(x), class = "vctrs_error_incompatible_cast")
expect_equal(as.list(x), list(x))
})
test_that("as.data.frame creates data frame", {
x <- new_vctr(1:3)
df <- as.data.frame(x)
expect_s3_class(df, "data.frame")
expect_equal(nrow(df), 3)
expect_named(df, "x")
})
# equality + comparison + arith + math ---------------------------------------
test_that("equality functions remapped", {
x <- new_vctr(c(1, 1, NA))
expect_error(x == 1, class = "vctrs_error_incompatible_type")
expect_error(x != 1, class = "vctrs_error_incompatible_type")
expect_equal(is.na(x), c(FALSE, FALSE, TRUE))
expect_true(anyNA(x))
expect_equal(unique(x), new_vctr(c(1, NA)))
expect_equal(duplicated(x), c(FALSE, TRUE, FALSE))
expect_true(anyDuplicated(x))
})
test_that("is.na<-() supported", {
x <- new_vctr(1:4)
is.na(x) <- c(FALSE, FALSE, TRUE, NA)
expect_identical(x, new_vctr(c(1:2, NA, 4L)))
is.na(x) <- TRUE
expect_identical(x, new_vctr(rep(NA_integer_, 4)))
})
test_that("comparison functions remapped", {
scoped_global_bindings(
vec_proxy_compare.bizzaro = function(x) -vec_data(x)
)
x1 <- new_vctr(c(1, 2), class = "bizzaro")
x2 <- new_vctr(2, class = "bizzaro")
expect_equal(order(x1), c(2L, 1L))
expect_equal(x1 < x2, c(FALSE, FALSE))
expect_equal(x1 <= x2, c(FALSE, TRUE))
expect_equal(x1 > x2, c(TRUE, FALSE))
expect_equal(x1 >= x2, c(TRUE, TRUE))
})
test_that("operators remapped", {
scoped_global_bindings(
vec_arith.bizzaro = function(op, x, y) 1L
)
x <- new_vctr(c(1, 2), class = "bizzaro")
expect_equal(x + 1, 1L)
expect_equal(x - 1, 1L)
expect_equal(x * 1, 1L)
expect_equal(x / 1, 1L)
expect_equal(x ^ 1, 1L)
expect_equal(x %% 1, 1L)
expect_equal(x %/% 1, 1L)
expect_equal(x & 1, 1L)
expect_equal(x | 1, 1L)
expect_equal(!x, 1L)
expect_equal(+x, 1L)
expect_equal(-x, 1L)
})
test_that("math functions overridden", {
scoped_global_bindings(
vec_math.bizzaro = function(fn, x, ...) vec_math_base(fn, 2L)
)
x <- new_vctr(c(1, NA), class = "bizzaro")
expect_equal(mean(x), 2L)
expect_equal(sum(x), 2L)
expect_equal(is.finite(x), TRUE)
expect_equal(is.infinite(x), FALSE)
expect_equal(is.nan(x), FALSE)
})
test_that("diff matches base R", {
scoped_global_bindings(
vec_arith.vctrs_minus = function(op, x, y) vec_arith_base(op, x, y)
)
x1 <- cumsum(cumsum(1:10))
x2 <- new_vctr(x1, class = "vctrs_minus")
expect_equal(diff(x2), diff(x1))
expect_equal(diff(x2, lag = 2L), diff(x1, lag = 2L))
expect_equal(diff(x2, differences = 2L), diff(x1, differences = 2L))
expect_equal(diff(x2, lag = 11), x2[0L])
expect_equal(diff(x2, differences = 11), x2[0L])
})
# names -------------------------------------------------------------------
test_that("all elements must be named if any are named", {
expect_error(new_vctr(setNames(1:2, c("a", NA))), "named")
expect_error(new_vctr(setNames(1:2, c("a", ""))), "named")
})
test_that("can not provide invalid names", {
x <- new_vctr(c(a = 1, b = 2))
expect_error(names(x) <- "x", "length")
expect_error(names(x) <- c("x", NA), "named")
expect_error(names(x) <- c("x", ""), "named")
expect_error(names(x) <- c("x", "y", "z"), "length")
expect_error(names(x) <- NULL, NA)
})
test_that("can use [ and [[ with names", {
scoped_global_bindings(
vec_ptype2.vctrs_vctr = function(...) dbl(),
vec_ptype2.double.vctrs_vctr = function(...) dbl()
)
x <- new_vctr(c(a = 1, b = 2))
expect_equal(x["b"], new_vctr(c(b = 2)))
expect_equal(x[["b"]], new_vctr(2)) # [[ drops names
x[["c"]] <- 3
expect_equal(x[["c"]], new_vctr(3))
x["d"] <- 4
expect_equal(x[["d"]], new_vctr(4))
})
test_that("can use [ and [[ with names - list vctr", {
scoped_global_bindings(
vec_ptype2.vctrs_vctr = function(...) list(),
vec_ptype2.list.vctrs_vctr = function(...) list()
)
y <- new_vctr(list(a = 1, b = 2))
y[["c"]] <- 3
expect_equal(y[["c"]], 3)
y["d"] <- list(4)
expect_equal(y[["d"]], 4)
})
test_that("can use [[<- to replace n-dimensional elements", {
scoped_global_bindings(
vec_restore.vctrs_mtrx = function(x, to, ...) x,
vec_ptype2.double.vctrs_mtrx = function(...) dbl(),
vec_ptype2.vctrs_mtrx = function(...) dbl()
)
x <- new_vctr(rep(1, times = 4), dim = c(2, 2), class = "vctrs_mtrx")
x[[2, 2]] <- 4
expect_equal(x[[2, 2]], 4)
})
test_that("subsetting preserves attributes", {
x <- new_vctr(c(a = 1, b = 2))
attr(x, "extra") <- TRUE
y <- x[1]
expect_equal(attr(x, "extra"), TRUE)
})
test_that("$ inherits from underlying vector", {
x1 <- new_vctr(c(a = 1, b = 2))
expect_error(x1$a, "atomic vectors")
expect_error(x1$a <- 2, "atomic vectors")
x2 <- new_vctr(list(a = 1, b = 2))
expect_equal(x2$a, 1)
x2$a <- 10
expect_equal(x2$a, 10)
})
# unsupported/unimplemented operations --------------------------------------
test_that("can't touch protected attributes", {
x <- new_vctr(1:4)
expect_error(dim(x) <- c(2, 2), class = "vctrs_error_unsupported")
expect_error(dimnames(x) <- list("x"), class = "vctrs_error_unsupported")
expect_error(levels(x), class = "vctrs_error_unsupported")
expect_error(levels(x) <- "x", class = "vctrs_error_unsupported")
# but it's ok to set names to NULL; this happens at least in vec_c
# and maybe elsewhere. We may need to back off on this level of
# strictness in the future
expect_error(names(x) <- NULL, NA)
})
test_that("summary is unimplemented", {
x <- new_vctr(1:4)
expect_error(summary(x), class = "vctrs_error_unimplemented")
})
# hidden class ------------------------------------------------------------
# We can't construct classes in test because the methods are not found
# when vctr generics call other generics. Instead we rely on a very simple
# class implemented in vctr.R
test_that("class preserved when subsetting", {
h <- new_hidden(1:4)
expect_s3_class(h, "hidden")
expect_s3_class(h[1], "hidden")
expect_s3_class(h[[1]], "hidden")
expect_s3_class(rep(h[1], 2), "hidden")
expect_s3_class(as.list(h)[[1]], "hidden")
length(h) <- 3
expect_s3_class(h, "hidden")
})
test_that("RHS cast when using subset assign", {
scoped_hidden()
h <- new_hidden(1)
expect_error(h[[1]] <- "x", class = "vctrs_error_incompatible_type")
expect_error(h[1] <- "x", class = "vctrs_error_incompatible_type")
h[2] <- 1
expect_equal(h, new_hidden(c(1, 1)))
h[[2]] <- 2
expect_equal(h, new_hidden(c(1, 2)))
})
test_that("c passes on to vec_c", {
scoped_hidden()
h <- new_hidden(1)
expect_equal(c(h), h)
expect_equal(c(h, NULL), h)
expect_equal(c(h, 1), rep(h, 2))
expect_equal(c(h, h), rep(h, 2))
})
test_that("summaries preserve class", {
h <- new_hidden(c(1, 2))
expect_equal(sum(h), new_hidden(3))
expect_equal(mean(h), new_hidden(1.5))
})
test_that("methods using vec_proxy_compare agree with base", {
h <- new_hidden(c(1:10))
h_na <- new_hidden(c(NA, 1:10))
expect_agree <- function(f, x, na.rm = FALSE) {
f <- enexpr(f)
expect_equal(vec_data((!!f)(x, na.rm = na.rm)), (!!f)(vec_data(x), na.rm = na.rm))
}
expect_agree(min, h)
expect_agree(max, h)
expect_agree(range, h)
expect_agree(min, h_na)
expect_agree(max, h_na)
expect_agree(range, h_na)
expect_agree(min, h_na, na.rm = TRUE)
expect_agree(max, h_na, na.rm = TRUE)
expect_agree(range, h_na, na.rm = TRUE)
})
test_that("can put in data frame", {
h <- new_hidden(1:4)
expect_named(as.data.frame(h), "h")
expect_named(data.frame(x = h), "x")
})
test_that("base coercions default to vec_cast", {
scoped_hidden()
h <- new_hidden(1)
expect_error(as.character(h), class = "vctrs_error_incompatible_cast")
expect_error(as.integer(h), class = "vctrs_error_incompatible_cast")
expect_error(generics::as.factor(h), class = "vctrs_error_incompatible_cast")
expect_error(generics::as.ordered(h), class = "vctrs_error_incompatible_cast")
expect_error(generics::as.difftime(h), class = "vctrs_error_incompatible_cast")
expect_equal(as.logical(h), TRUE)
expect_equal(as.double(h), 1)
})
test_that("default print and str methods are useful", {
h <- new_hidden(1:4)
expect_known_output(
{
print(h)
cat("\n")
print(h[0])
cat("\n")
str(h)
},
file = "test-vctr-print.txt",
)
})
test_that("default print method shows names", {
h <- new_hidden(c(A = 1, B = 2, C = 3))
expect_known_output(
{
print(h)
},
file = "test-vctr-print-names.txt",
)
})
test_that("can't transpose", {
h <- new_hidden(1:4)
expect_error(t(h), class = "vctrs_error_unsupported")
})
test_that("shaped vctrs can be cast to data frames", {
x <- new_vctr(1:4, dim = 4)
expect_identical(as.data.frame(x), data.frame(V1 = 1:4))
x <- new_vctr(1:4, dim = c(2, 2))
expect_identical(as.data.frame(x), data.frame(V1 = 1:2, V2 = 3:4))
})
# slicing -----------------------------------------------------------------
test_that("additional subscripts are handled (#269)", {
new_2d <- function(.data, dim) {
vctrs::new_vctr(.data, dim = dim, class = "vctrs_2d")
}
x <- new_2d(c(1, 2), dim = c(2L, 1L))
expect_identical(x[1], new_2d(1, dim = c(1, 1)))
expect_identical(x[1, 1], new_2d(1, dim = c(1, 1)))
expect_identical(x[, 1], new_2d(c(1, 2), dim = c(2, 1)))
})
# summary generics --------------------------------------------------------
test_that("na.rm is forwarded to summary generics", {
x <- new_vctr(dbl(1, 2, NA))
expect_identical(mean(x, na.rm = FALSE), new_vctr(dbl(NA)))
expect_identical(mean(x, na.rm = TRUE), new_vctr(1.5))
expect_identical(min(x, na.rm = FALSE), new_vctr(dbl(NA)))
expect_identical(min(x, na.rm = TRUE), new_vctr(1))
expect_identical(max(x, na.rm = FALSE), new_vctr(dbl(NA)))
expect_identical(max(x, na.rm = TRUE), new_vctr(2))
x <- new_vctr(lgl(TRUE, NA))
expect_identical(all(x, na.rm = FALSE), lgl(NA))
expect_identical(all(x, na.rm = TRUE), TRUE)
})
test_that("Summary generics behave identically to base for empty vctrs (#88)", {
expect_warning(
expect_identical(
new_vctr(max(numeric())),
max(new_vctr(numeric()))
)
)
expect_warning(
expect_identical(
new_vctr(min(numeric())),
min(new_vctr(numeric()))
)
)
expect_warning(
expect_identical(
new_vctr(range(numeric())),
range(new_vctr(numeric()))
)
)
expect_identical(
new_vctr(prod(numeric())),
prod(new_vctr(numeric()))
)
expect_identical(
new_vctr(sum(numeric())),
sum(new_vctr(numeric()))
)
expect_identical(
new_vctr(cummax(numeric())),
cummax(new_vctr(numeric()))
)
expect_identical(
new_vctr(cummin(numeric())),
cummin(new_vctr(numeric()))
)
expect_identical(
new_vctr(cumprod(numeric())),
cumprod(new_vctr(numeric()))
)
expect_identical(
new_vctr(cumsum(numeric())),
cumsum(new_vctr(numeric()))
)
expect_identical(
new_vctr(mean(numeric())),
mean(new_vctr(numeric()))
)
})
test_that("generic predicates return logical vectors (#251)", {
x <- new_vctr(c(1, 2))
expect_identical(is.finite(x), c(TRUE, TRUE))
expect_identical(is.infinite(x), c(FALSE, FALSE))
expect_identical(is.nan(x), c(FALSE, FALSE))
x <- new_vctr(TRUE)
expect_identical(any(x), TRUE)
expect_identical(all(x), TRUE)
})
| /packrat/lib/x86_64-pc-linux-gnu/3.6.1/vctrs/tests/testthat/test-type-vctr.R | no_license | josehur/test_rstudio | R | false | false | 13,956 | r | context("test-type-vctr")
test_that("constructor sets attributes", {
x <- new_vctr(1:4, class = "x", x = 1)
expect_equal(x, structure(1:4, class = c("x", "vctrs_vctr"), x = 1))
})
test_that(".data must be a vector", {
expect_error(new_vctr(mean), "vector type")
})
test_that("attributes other than names are ignored", {
out <- new_vctr(structure(1, a = 1))
expect_null(attributes(out)$a)
})
test_that("default format method is internal", {
x <- new_vctr(1, class = "x")
expect_equal(format(x), format(x))
})
test_that("vctr class is proxied", {
expect_identical(vec_proxy(new_vctr(1:3)), new_vctr(1:3))
expect_identical(vec_proxy(new_vctr(as.list(1:3))), unclass(new_vctr(as.list(1:3))))
expect_true(vec_is(new_vctr(as.list(1:3))))
})
test_that("attributes must be named", {
expect_error(vec_set_attributes(1, list(1)), "must be named")
expect_error(vec_set_attributes(1, list(y = 1, 2)), "2 does not")
})
test_that("can strip all attributes without adding new ones", {
expect_equal(vec_set_attributes(structure(1, a = 1), NULL), 1)
})
# Cast/restore ------------------------------------------------------------
test_that("cast to NULL returns x", {
x <- new_vctr(1, class = "x")
expect_equal(vec_cast(NULL, x), NULL)
})
test_that("cast succeeds if attributes equal", {
x1 <- new_vctr(1, class = "x", a = 1, b = 2)
x2 <- new_vctr(2, class = "x", a = 1, b = 2)
expect_equal(vec_cast(x1, x2), x1)
expect_equal(vec_cast(x2, x1), x2)
})
test_that("and fails if attributes are different", {
x1 <- new_vctr(1, class = "x", a = 1, b = 2)
x2 <- new_vctr(2, class = "x", a = 2, b = 2)
expect_error(vec_cast(x1, x2), class = "vctrs_error_incompatible_cast")
})
test_that("restoring to atomic vector of same type preserves attributes", {
x1 <- new_vctr(1, class = "x")
x2 <- new_vctr(2, class = "x")
expect_equal(vec_restore(2, x1), x2)
})
test_that("restoring to atomic vector of different type throws error", {
x1 <- new_vctr(1, class = "x")
expect_error(vec_restore("x", x1), class = "vctrs_error_incompatible_cast")
})
test_that("base coercion methods mapped to vec_cast", {
x <- new_vctr(1)
expect_error(as.logical(x), class = "vctrs_error_incompatible_cast")
expect_error(as.integer(x), class = "vctrs_error_incompatible_cast")
expect_error(as.logical(x), class = "vctrs_error_incompatible_cast")
expect_error(as.double(x), class = "vctrs_error_incompatible_cast")
expect_error(as.character(x), class = "vctrs_error_incompatible_cast")
expect_error(as.Date(x), class = "vctrs_error_incompatible_cast")
expect_error(as.POSIXct(x), class = "vctrs_error_incompatible_cast")
expect_equal(as.list(x), list(x))
})
test_that("as.data.frame creates data frame", {
x <- new_vctr(1:3)
df <- as.data.frame(x)
expect_s3_class(df, "data.frame")
expect_equal(nrow(df), 3)
expect_named(df, "x")
})
# equality + comparison + arith + math ---------------------------------------
test_that("equality functions remapped", {
x <- new_vctr(c(1, 1, NA))
expect_error(x == 1, class = "vctrs_error_incompatible_type")
expect_error(x != 1, class = "vctrs_error_incompatible_type")
expect_equal(is.na(x), c(FALSE, FALSE, TRUE))
expect_true(anyNA(x))
expect_equal(unique(x), new_vctr(c(1, NA)))
expect_equal(duplicated(x), c(FALSE, TRUE, FALSE))
expect_true(anyDuplicated(x))
})
test_that("is.na<-() supported", {
x <- new_vctr(1:4)
is.na(x) <- c(FALSE, FALSE, TRUE, NA)
expect_identical(x, new_vctr(c(1:2, NA, 4L)))
is.na(x) <- TRUE
expect_identical(x, new_vctr(rep(NA_integer_, 4)))
})
test_that("comparison functions remapped", {
scoped_global_bindings(
vec_proxy_compare.bizzaro = function(x) -vec_data(x)
)
x1 <- new_vctr(c(1, 2), class = "bizzaro")
x2 <- new_vctr(2, class = "bizzaro")
expect_equal(order(x1), c(2L, 1L))
expect_equal(x1 < x2, c(FALSE, FALSE))
expect_equal(x1 <= x2, c(FALSE, TRUE))
expect_equal(x1 > x2, c(TRUE, FALSE))
expect_equal(x1 >= x2, c(TRUE, TRUE))
})
test_that("operators remapped", {
scoped_global_bindings(
vec_arith.bizzaro = function(op, x, y) 1L
)
x <- new_vctr(c(1, 2), class = "bizzaro")
expect_equal(x + 1, 1L)
expect_equal(x - 1, 1L)
expect_equal(x * 1, 1L)
expect_equal(x / 1, 1L)
expect_equal(x ^ 1, 1L)
expect_equal(x %% 1, 1L)
expect_equal(x %/% 1, 1L)
expect_equal(x & 1, 1L)
expect_equal(x | 1, 1L)
expect_equal(!x, 1L)
expect_equal(+x, 1L)
expect_equal(-x, 1L)
})
test_that("math functions overridden", {
scoped_global_bindings(
vec_math.bizzaro = function(fn, x, ...) vec_math_base(fn, 2L)
)
x <- new_vctr(c(1, NA), class = "bizzaro")
expect_equal(mean(x), 2L)
expect_equal(sum(x), 2L)
expect_equal(is.finite(x), TRUE)
expect_equal(is.infinite(x), FALSE)
expect_equal(is.nan(x), FALSE)
})
test_that("diff matches base R", {
scoped_global_bindings(
vec_arith.vctrs_minus = function(op, x, y) vec_arith_base(op, x, y)
)
x1 <- cumsum(cumsum(1:10))
x2 <- new_vctr(x1, class = "vctrs_minus")
expect_equal(diff(x2), diff(x1))
expect_equal(diff(x2, lag = 2L), diff(x1, lag = 2L))
expect_equal(diff(x2, differences = 2L), diff(x1, differences = 2L))
expect_equal(diff(x2, lag = 11), x2[0L])
expect_equal(diff(x2, differences = 11), x2[0L])
})
# names -------------------------------------------------------------------
test_that("all elements must be named if any are named", {
expect_error(new_vctr(setNames(1:2, c("a", NA))), "named")
expect_error(new_vctr(setNames(1:2, c("a", ""))), "named")
})
test_that("can not provide invalid names", {
x <- new_vctr(c(a = 1, b = 2))
expect_error(names(x) <- "x", "length")
expect_error(names(x) <- c("x", NA), "named")
expect_error(names(x) <- c("x", ""), "named")
expect_error(names(x) <- c("x", "y", "z"), "length")
expect_error(names(x) <- NULL, NA)
})
test_that("can use [ and [[ with names", {
scoped_global_bindings(
vec_ptype2.vctrs_vctr = function(...) dbl(),
vec_ptype2.double.vctrs_vctr = function(...) dbl()
)
x <- new_vctr(c(a = 1, b = 2))
expect_equal(x["b"], new_vctr(c(b = 2)))
expect_equal(x[["b"]], new_vctr(2)) # [[ drops names
x[["c"]] <- 3
expect_equal(x[["c"]], new_vctr(3))
x["d"] <- 4
expect_equal(x[["d"]], new_vctr(4))
})
test_that("can use [ and [[ with names - list vctr", {
scoped_global_bindings(
vec_ptype2.vctrs_vctr = function(...) list(),
vec_ptype2.list.vctrs_vctr = function(...) list()
)
y <- new_vctr(list(a = 1, b = 2))
y[["c"]] <- 3
expect_equal(y[["c"]], 3)
y["d"] <- list(4)
expect_equal(y[["d"]], 4)
})
test_that("can use [[<- to replace n-dimensional elements", {
scoped_global_bindings(
vec_restore.vctrs_mtrx = function(x, to, ...) x,
vec_ptype2.double.vctrs_mtrx = function(...) dbl(),
vec_ptype2.vctrs_mtrx = function(...) dbl()
)
x <- new_vctr(rep(1, times = 4), dim = c(2, 2), class = "vctrs_mtrx")
x[[2, 2]] <- 4
expect_equal(x[[2, 2]], 4)
})
test_that("subsetting preserves attributes", {
x <- new_vctr(c(a = 1, b = 2))
attr(x, "extra") <- TRUE
y <- x[1]
expect_equal(attr(x, "extra"), TRUE)
})
test_that("$ inherits from underlying vector", {
x1 <- new_vctr(c(a = 1, b = 2))
expect_error(x1$a, "atomic vectors")
expect_error(x1$a <- 2, "atomic vectors")
x2 <- new_vctr(list(a = 1, b = 2))
expect_equal(x2$a, 1)
x2$a <- 10
expect_equal(x2$a, 10)
})
# unsupported/unimplemented operations --------------------------------------
test_that("can't touch protected attributes", {
x <- new_vctr(1:4)
expect_error(dim(x) <- c(2, 2), class = "vctrs_error_unsupported")
expect_error(dimnames(x) <- list("x"), class = "vctrs_error_unsupported")
expect_error(levels(x), class = "vctrs_error_unsupported")
expect_error(levels(x) <- "x", class = "vctrs_error_unsupported")
# but it's ok to set names to NULL; this happens at least in vec_c
# and maybe elsewhere. We may need to back off on this level of
# strictness in the future
expect_error(names(x) <- NULL, NA)
})
test_that("summary is unimplemented", {
x <- new_vctr(1:4)
expect_error(summary(x), class = "vctrs_error_unimplemented")
})
# hidden class ------------------------------------------------------------
# We can't construct classes in test because the methods are not found
# when vctr generics call other generics. Instead we rely on a very simple
# class implemented in vctr.R
test_that("class preserved when subsetting", {
h <- new_hidden(1:4)
expect_s3_class(h, "hidden")
expect_s3_class(h[1], "hidden")
expect_s3_class(h[[1]], "hidden")
expect_s3_class(rep(h[1], 2), "hidden")
expect_s3_class(as.list(h)[[1]], "hidden")
length(h) <- 3
expect_s3_class(h, "hidden")
})
test_that("RHS cast when using subset assign", {
scoped_hidden()
h <- new_hidden(1)
expect_error(h[[1]] <- "x", class = "vctrs_error_incompatible_type")
expect_error(h[1] <- "x", class = "vctrs_error_incompatible_type")
h[2] <- 1
expect_equal(h, new_hidden(c(1, 1)))
h[[2]] <- 2
expect_equal(h, new_hidden(c(1, 2)))
})
test_that("c passes on to vec_c", {
scoped_hidden()
h <- new_hidden(1)
expect_equal(c(h), h)
expect_equal(c(h, NULL), h)
expect_equal(c(h, 1), rep(h, 2))
expect_equal(c(h, h), rep(h, 2))
})
test_that("summaries preserve class", {
h <- new_hidden(c(1, 2))
expect_equal(sum(h), new_hidden(3))
expect_equal(mean(h), new_hidden(1.5))
})
test_that("methods using vec_proxy_compare agree with base", {
h <- new_hidden(c(1:10))
h_na <- new_hidden(c(NA, 1:10))
expect_agree <- function(f, x, na.rm = FALSE) {
f <- enexpr(f)
expect_equal(vec_data((!!f)(x, na.rm = na.rm)), (!!f)(vec_data(x), na.rm = na.rm))
}
expect_agree(min, h)
expect_agree(max, h)
expect_agree(range, h)
expect_agree(min, h_na)
expect_agree(max, h_na)
expect_agree(range, h_na)
expect_agree(min, h_na, na.rm = TRUE)
expect_agree(max, h_na, na.rm = TRUE)
expect_agree(range, h_na, na.rm = TRUE)
})
test_that("can put in data frame", {
h <- new_hidden(1:4)
expect_named(as.data.frame(h), "h")
expect_named(data.frame(x = h), "x")
})
test_that("base coercions default to vec_cast", {
scoped_hidden()
h <- new_hidden(1)
expect_error(as.character(h), class = "vctrs_error_incompatible_cast")
expect_error(as.integer(h), class = "vctrs_error_incompatible_cast")
expect_error(generics::as.factor(h), class = "vctrs_error_incompatible_cast")
expect_error(generics::as.ordered(h), class = "vctrs_error_incompatible_cast")
expect_error(generics::as.difftime(h), class = "vctrs_error_incompatible_cast")
expect_equal(as.logical(h), TRUE)
expect_equal(as.double(h), 1)
})
test_that("default print and str methods are useful", {
h <- new_hidden(1:4)
expect_known_output(
{
print(h)
cat("\n")
print(h[0])
cat("\n")
str(h)
},
file = "test-vctr-print.txt",
)
})
test_that("default print method shows names", {
h <- new_hidden(c(A = 1, B = 2, C = 3))
expect_known_output(
{
print(h)
},
file = "test-vctr-print-names.txt",
)
})
test_that("can't transpose", {
h <- new_hidden(1:4)
expect_error(t(h), class = "vctrs_error_unsupported")
})
test_that("shaped vctrs can be cast to data frames", {
x <- new_vctr(1:4, dim = 4)
expect_identical(as.data.frame(x), data.frame(V1 = 1:4))
x <- new_vctr(1:4, dim = c(2, 2))
expect_identical(as.data.frame(x), data.frame(V1 = 1:2, V2 = 3:4))
})
# slicing -----------------------------------------------------------------
test_that("additional subscripts are handled (#269)", {
new_2d <- function(.data, dim) {
vctrs::new_vctr(.data, dim = dim, class = "vctrs_2d")
}
x <- new_2d(c(1, 2), dim = c(2L, 1L))
expect_identical(x[1], new_2d(1, dim = c(1, 1)))
expect_identical(x[1, 1], new_2d(1, dim = c(1, 1)))
expect_identical(x[, 1], new_2d(c(1, 2), dim = c(2, 1)))
})
# summary generics --------------------------------------------------------
test_that("na.rm is forwarded to summary generics", {
x <- new_vctr(dbl(1, 2, NA))
expect_identical(mean(x, na.rm = FALSE), new_vctr(dbl(NA)))
expect_identical(mean(x, na.rm = TRUE), new_vctr(1.5))
expect_identical(min(x, na.rm = FALSE), new_vctr(dbl(NA)))
expect_identical(min(x, na.rm = TRUE), new_vctr(1))
expect_identical(max(x, na.rm = FALSE), new_vctr(dbl(NA)))
expect_identical(max(x, na.rm = TRUE), new_vctr(2))
x <- new_vctr(lgl(TRUE, NA))
expect_identical(all(x, na.rm = FALSE), lgl(NA))
expect_identical(all(x, na.rm = TRUE), TRUE)
})
test_that("Summary generics behave identically to base for empty vctrs (#88)", {
expect_warning(
expect_identical(
new_vctr(max(numeric())),
max(new_vctr(numeric()))
)
)
expect_warning(
expect_identical(
new_vctr(min(numeric())),
min(new_vctr(numeric()))
)
)
expect_warning(
expect_identical(
new_vctr(range(numeric())),
range(new_vctr(numeric()))
)
)
expect_identical(
new_vctr(prod(numeric())),
prod(new_vctr(numeric()))
)
expect_identical(
new_vctr(sum(numeric())),
sum(new_vctr(numeric()))
)
expect_identical(
new_vctr(cummax(numeric())),
cummax(new_vctr(numeric()))
)
expect_identical(
new_vctr(cummin(numeric())),
cummin(new_vctr(numeric()))
)
expect_identical(
new_vctr(cumprod(numeric())),
cumprod(new_vctr(numeric()))
)
expect_identical(
new_vctr(cumsum(numeric())),
cumsum(new_vctr(numeric()))
)
expect_identical(
new_vctr(mean(numeric())),
mean(new_vctr(numeric()))
)
})
test_that("generic predicates return logical vectors (#251)", {
x <- new_vctr(c(1, 2))
expect_identical(is.finite(x), c(TRUE, TRUE))
expect_identical(is.infinite(x), c(FALSE, FALSE))
expect_identical(is.nan(x), c(FALSE, FALSE))
x <- new_vctr(TRUE)
expect_identical(any(x), TRUE)
expect_identical(all(x), TRUE)
})
|
\name{netsnp}
\alias{netsnp}
\title{Reconstructs intra- and inter- chromosomal conditional interactions among genetic loci}
\description{
This is one of the main functions of the \pkg{netgwas} package. This function can be used to reconstruct the intra- and inter-chromosomal interactions among genetic loci in diploids and polyploids. The input data can be belong to any biparental genotype data which contains at least two genotype states. Two methods are available to reconstruct the network, namely (1) approximation method, and (2) gibbs sampling within the Gaussian copula graphical model. Both methods are able to deal with missing genotypes.
}
\usage{
netsnp(data, method = "gibbs", rho = NULL, n.rho = NULL, rho.ratio = NULL,
ncores = 1, em.iter = 5, em.tol = .001, verbose = TRUE)
}
\arguments{
\item{data}{ An (\eqn{n \times p}) matrix or a \code{data.frame} corresponding to a genotype data matrix (\eqn{n} is the sample size and \eqn{p} is the number of variables). It also could be an object of class \code{"simgeno"}. Input data can contain missing values.}
\item{method}{ Reconstructs intra- and inter- chromosomal conditional interactions (epistatic selection) network with three methods: "gibbs", "approx", and "npn". For a medium (~500) and a large number of variables we would recommend to choose "gibbs" and "approx", respectively. For a very large number of variables (> 2000) choose "npn". The default method is "gibbs".}
\item{rho}{ A decreasing sequence of non-negative numbers that control the sparsity level. Leaving the input as \code{rho = NULL}, the program automatically computes a sequence of \code{rho} based on \code{n.rho} and \code{rho.ratio}. Users can also supply a decreasing sequence values to override this.}
\item{n.rho}{ The number of regularization parameters. The default value is \code{10}.}
\item{rho.ratio}{Determines the distance between the elements of \code{rho} sequence. A small value of \code{rho.ratio} results in a large distance between the elements of \code{rho} sequence. And a large value of \code{rho.ratio} results into a small distance between elements of \code{rho}. If keep it as NULL the program internally chooses a value.}
\item{ncores}{ The number of cores to use for the calculations. Using \code{ncores = "all"} automatically detects number of available cores and runs the computations in parallel on (available cores - 1).}
\item{em.iter}{ The number of EM iterations. The default value is 5.}
\item{em.tol}{ A criteria to stop the EM iterations. The default value is .001. }
\item{verbose}{Providing a detail message for tracing output. The default value is \code{TRUE}.}
}
\details{
Viability is a phenotype that can be considered. This function detects the conditional dependent short- and long-range linkage disequilibrium structure of genomes and thus reveals aberrant marker-marker associations that are due to epistatic selection.
This function can be used to estimate conditional independence relationships between partially observed data that not follow Gaussianity assumption (e.g. continuous non-Gaussian, discrete, or mixed dataset).
}
\value{
An object with S3 class \code{"netgwas"} is returned:
\item{Theta}{ A list of estimated p by p precision matrices that show the conditional independence relationships patterns among genetic loci.}
\item{path}{ A list of estimated p by p adjacency matrices. This is the graph path corresponding to \code{Theta}.}
\item{ES}{ A list of estimated p by p conditional expectation corresponding to \code{rho}.}
\item{Z}{ A list of n by p transformed data based on Gaussian copula. }
\item{rho}{ A \code{n.rho} dimensional vector containing the penalty terms.}
\item{loglik }{ A \code{n.rho} dimensional vector containing the maximized log-likelihood values along the graph path. }
\item{data}{ The \eqn{n} by \eqn{p} input data matrix. }
}
\note{ This function estimates a graph path . To select an optimal graph please refer to \code{\link{selectnet}}.}
\author{
Pariya Behrouzi and Ernst C. Wit \cr
Maintainers: Pariya Behrouzi \email{pariya.behrouzi@gmail.com}\cr
}
\references{
1. Behrouzi, P., and Wit, E. C. (2019). Detecting epistatic selection with partially observed genotype data by using copula graphical models. Journal of the Royal Statistical Society: Series C (Applied Statistics), 68(1), 141-160. \cr
2. Behrouzi, P., and Wit, E. C. (2017c). netgwas: An R Package for Network-Based Genome-Wide Association Studies. arXiv preprint, arXiv:1710.01236.
3. D. Witten and J. Friedman. New insights and faster computations for the graphical lasso. \emph{Journal of Computational and Graphical Statistics}, to appear, 2011. \cr
4. Guo, Jian, et al. "Graphical models for ordinal data." Journal of Computational and Graphical Statistics 24.1 (2015): 183-204. \cr
}
\seealso{\code{\link{selectnet}}}
\examples{
\dontshow{
D <- simgeno(p=20, n=10, k= 3, adjacent = 1, alpha = 0.06 , beta = 0.3)
out <- netsnp(data = D$data, n.rho= 2, ncores= 1)
plot(out)
sel <- selectnet(out)
}
\donttest{
data(CviCol)
out <- netsnp(CviCol); out
plot(out)
#select optimal graph
epi <- selectnet(out)
plot(epi, vis="CI", xlab="markers", ylab="markers",
n.mem = c(24,14,17,16,19), vertex.size=4)
#Visualize interactive plot of the selected network
#Different colors for each chromosome
cl <- c(rep("red", 24), rep("white",14), rep("tan1",17),
rep("gray",16), rep("lightblue2",19))
plot(epi, vis="interactive", vertex.color= cl)
#Partial correlations between markers on genome
image(as.matrix(epi$par.cor), xlab="markers", ylab="markers", sub="")
}
} | /man/netsnp.Rd | no_license | cran/netgwas | R | false | false | 5,669 | rd | \name{netsnp}
\alias{netsnp}
\title{Reconstructs intra- and inter- chromosomal conditional interactions among genetic loci}
\description{
This is one of the main functions of the \pkg{netgwas} package. This function can be used to reconstruct the intra- and inter-chromosomal interactions among genetic loci in diploids and polyploids. The input data can be belong to any biparental genotype data which contains at least two genotype states. Two methods are available to reconstruct the network, namely (1) approximation method, and (2) gibbs sampling within the Gaussian copula graphical model. Both methods are able to deal with missing genotypes.
}
\usage{
netsnp(data, method = "gibbs", rho = NULL, n.rho = NULL, rho.ratio = NULL,
ncores = 1, em.iter = 5, em.tol = .001, verbose = TRUE)
}
\arguments{
\item{data}{ An (\eqn{n \times p}) matrix or a \code{data.frame} corresponding to a genotype data matrix (\eqn{n} is the sample size and \eqn{p} is the number of variables). It also could be an object of class \code{"simgeno"}. Input data can contain missing values.}
\item{method}{ Reconstructs intra- and inter- chromosomal conditional interactions (epistatic selection) network with three methods: "gibbs", "approx", and "npn". For a medium (~500) and a large number of variables we would recommend to choose "gibbs" and "approx", respectively. For a very large number of variables (> 2000) choose "npn". The default method is "gibbs".}
\item{rho}{ A decreasing sequence of non-negative numbers that control the sparsity level. Leaving the input as \code{rho = NULL}, the program automatically computes a sequence of \code{rho} based on \code{n.rho} and \code{rho.ratio}. Users can also supply a decreasing sequence values to override this.}
\item{n.rho}{ The number of regularization parameters. The default value is \code{10}.}
\item{rho.ratio}{Determines the distance between the elements of \code{rho} sequence. A small value of \code{rho.ratio} results in a large distance between the elements of \code{rho} sequence. And a large value of \code{rho.ratio} results into a small distance between elements of \code{rho}. If keep it as NULL the program internally chooses a value.}
\item{ncores}{ The number of cores to use for the calculations. Using \code{ncores = "all"} automatically detects number of available cores and runs the computations in parallel on (available cores - 1).}
\item{em.iter}{ The number of EM iterations. The default value is 5.}
\item{em.tol}{ A criteria to stop the EM iterations. The default value is .001. }
\item{verbose}{Providing a detail message for tracing output. The default value is \code{TRUE}.}
}
\details{
Viability is a phenotype that can be considered. This function detects the conditional dependent short- and long-range linkage disequilibrium structure of genomes and thus reveals aberrant marker-marker associations that are due to epistatic selection.
This function can be used to estimate conditional independence relationships between partially observed data that not follow Gaussianity assumption (e.g. continuous non-Gaussian, discrete, or mixed dataset).
}
\value{
An object with S3 class \code{"netgwas"} is returned:
\item{Theta}{ A list of estimated p by p precision matrices that show the conditional independence relationships patterns among genetic loci.}
\item{path}{ A list of estimated p by p adjacency matrices. This is the graph path corresponding to \code{Theta}.}
\item{ES}{ A list of estimated p by p conditional expectation corresponding to \code{rho}.}
\item{Z}{ A list of n by p transformed data based on Gaussian copula. }
\item{rho}{ A \code{n.rho} dimensional vector containing the penalty terms.}
\item{loglik }{ A \code{n.rho} dimensional vector containing the maximized log-likelihood values along the graph path. }
\item{data}{ The \eqn{n} by \eqn{p} input data matrix. }
}
\note{ This function estimates a graph path . To select an optimal graph please refer to \code{\link{selectnet}}.}
\author{
Pariya Behrouzi and Ernst C. Wit \cr
Maintainers: Pariya Behrouzi \email{pariya.behrouzi@gmail.com}\cr
}
\references{
1. Behrouzi, P., and Wit, E. C. (2019). Detecting epistatic selection with partially observed genotype data by using copula graphical models. Journal of the Royal Statistical Society: Series C (Applied Statistics), 68(1), 141-160. \cr
2. Behrouzi, P., and Wit, E. C. (2017c). netgwas: An R Package for Network-Based Genome-Wide Association Studies. arXiv preprint, arXiv:1710.01236.
3. D. Witten and J. Friedman. New insights and faster computations for the graphical lasso. \emph{Journal of Computational and Graphical Statistics}, to appear, 2011. \cr
4. Guo, Jian, et al. "Graphical models for ordinal data." Journal of Computational and Graphical Statistics 24.1 (2015): 183-204. \cr
}
\seealso{\code{\link{selectnet}}}
\examples{
\dontshow{
D <- simgeno(p=20, n=10, k= 3, adjacent = 1, alpha = 0.06 , beta = 0.3)
out <- netsnp(data = D$data, n.rho= 2, ncores= 1)
plot(out)
sel <- selectnet(out)
}
\donttest{
data(CviCol)
out <- netsnp(CviCol); out
plot(out)
#select optimal graph
epi <- selectnet(out)
plot(epi, vis="CI", xlab="markers", ylab="markers",
n.mem = c(24,14,17,16,19), vertex.size=4)
#Visualize interactive plot of the selected network
#Different colors for each chromosome
cl <- c(rep("red", 24), rep("white",14), rep("tan1",17),
rep("gray",16), rep("lightblue2",19))
plot(epi, vis="interactive", vertex.color= cl)
#Partial correlations between markers on genome
image(as.matrix(epi$par.cor), xlab="markers", ylab="markers", sub="")
}
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitting_funcs.R
\name{mult_abund_probit}
\alias{mult_abund_probit}
\title{Perform RJMCMC for posterior sampling of multivariate cluster occurence
model with Bernoulli observations}
\usage{
mult_abund_probit(data_list, prior_list, initial_list, block,
begin_group_update, update_omega, burn, iter)
}
\arguments{
\item{data_list}{A named list created of data items created from user data with the
function \code{multAbund::make_data_list}. This data will be used for model fitting.}
\item{prior_list}{A named list of prior parameters. See details.}
\item{initial_list}{A named list of initial values for the parameters (see details).
The functions \code{multAbund::sugs} or \code{multAbund::make_inits} can be used to create this
list.}
\item{block}{Number of iterations between Metropolis proposal adaptation.}
\item{begin_group_update}{The iteration at with the group clusters begin updating.
The RJMCMC often performs better when the chain is allowed to sample only the parameters
before the groups begin updating.}
\item{update_omega}{logical. Should omega be updated during the MCMC or remain fixed.}
\item{burn}{Number of burnin iterations that are discarded.}
\item{iter}{Number of iterations retained for posterior inference.}
}
\description{
Fit a Dirichlet Process random effect model for joint species distribution inference of binary occurence data using
a RJMCMC procedure.
}
\details{
The \code{prior_list} argument needs to contain the following items:
\itemize{
\item a_alpha numeric. shape parameter for gamma prior on alpha,
\item b_alpha numeric. scale parameter for gamma prior on alpha,
\item Sigma_beta_inv numeric matrix. precision matrix for global regression coefficients, beta
\item mu_beta numeric vector. prior mean of beta parameters
\item phi_omega numeric. scale for half t/normal prior on delta variance parameter (omega),
\item df_omega numeric. degrees of freedom for half-t prior on omega (df_omega>=50 means a half-normal will be assumed)
\item phi_sigma numeric. scale parameter for half-t/normal prior for sigma parameters
\item df_sigma numeric. degrees of freedom for sigma prior (>=50 implies half-normal will be used)
}
}
\author{
Devin S. Johnson
}
| /man/mult_abund_probit.Rd | no_license | DataFusion18/multAbund | R | false | true | 2,282 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitting_funcs.R
\name{mult_abund_probit}
\alias{mult_abund_probit}
\title{Perform RJMCMC for posterior sampling of multivariate cluster occurence
model with Bernoulli observations}
\usage{
mult_abund_probit(data_list, prior_list, initial_list, block,
begin_group_update, update_omega, burn, iter)
}
\arguments{
\item{data_list}{A named list created of data items created from user data with the
function \code{multAbund::make_data_list}. This data will be used for model fitting.}
\item{prior_list}{A named list of prior parameters. See details.}
\item{initial_list}{A named list of initial values for the parameters (see details).
The functions \code{multAbund::sugs} or \code{multAbund::make_inits} can be used to create this
list.}
\item{block}{Number of iterations between Metropolis proposal adaptation.}
\item{begin_group_update}{The iteration at with the group clusters begin updating.
The RJMCMC often performs better when the chain is allowed to sample only the parameters
before the groups begin updating.}
\item{update_omega}{logical. Should omega be updated during the MCMC or remain fixed.}
\item{burn}{Number of burnin iterations that are discarded.}
\item{iter}{Number of iterations retained for posterior inference.}
}
\description{
Fit a Dirichlet Process random effect model for joint species distribution inference of binary occurence data using
a RJMCMC procedure.
}
\details{
The \code{prior_list} argument needs to contain the following items:
\itemize{
\item a_alpha numeric. shape parameter for gamma prior on alpha,
\item b_alpha numeric. scale parameter for gamma prior on alpha,
\item Sigma_beta_inv numeric matrix. precision matrix for global regression coefficients, beta
\item mu_beta numeric vector. prior mean of beta parameters
\item phi_omega numeric. scale for half t/normal prior on delta variance parameter (omega),
\item df_omega numeric. degrees of freedom for half-t prior on omega (df_omega>=50 means a half-normal will be assumed)
\item phi_sigma numeric. scale parameter for half-t/normal prior for sigma parameters
\item df_sigma numeric. degrees of freedom for sigma prior (>=50 implies half-normal will be used)
}
}
\author{
Devin S. Johnson
}
|
{{rimport}}('__init__.r', 'plot.r')
infile = {{i.infile | R}}
outfile = {{o.outfile | R}}
outdir = {{o.outdir | R}}
inopts = {{args.inopts | R}}
params = {{args.params | R}}
devpars = {{args.devpars | R}}
ggs = {{args.ggs | R}}
indata = read.table.inopts(infile, inopts)
params$returnAUC = T
plotfile = file.path(outdir, '{{i.infile | fn}}.roc.png')
aucs = plot.roc(indata, plotfile, stacked = F, params = params, ggs = ggs, devpars = devpars)
aucs = t(as.data.frame(aucs))
write.table(pretty.numbers2(aucs, . = '%.3f'), outfile, sep = "\t", quote = F, col.names = F, row.names = T)
| /bioprocs/scripts/plot/pROC.r | permissive | LeaveYeah/bioprocs | R | false | false | 595 | r |
{{rimport}}('__init__.r', 'plot.r')
infile = {{i.infile | R}}
outfile = {{o.outfile | R}}
outdir = {{o.outdir | R}}
inopts = {{args.inopts | R}}
params = {{args.params | R}}
devpars = {{args.devpars | R}}
ggs = {{args.ggs | R}}
indata = read.table.inopts(infile, inopts)
params$returnAUC = T
plotfile = file.path(outdir, '{{i.infile | fn}}.roc.png')
aucs = plot.roc(indata, plotfile, stacked = F, params = params, ggs = ggs, devpars = devpars)
aucs = t(as.data.frame(aucs))
write.table(pretty.numbers2(aucs, . = '%.3f'), outfile, sep = "\t", quote = F, col.names = F, row.names = T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcFST.R
\name{calcFST}
\alias{calcFST}
\title{Calculates SNP-level FST values given two populations}
\usage{
calcFST(genotype_file, fam_file, output_folder, output_file)
}
\arguments{
\item{genotype_file}{(char) path to PLINK-formatted SNP genotype data.}
\item{fam_file}{(char) path to PLINK population-coded fam file.}
\item{output_folder}{(char) path to output directory.}
\item{output_file}{(char) path to final GSEA-formatted input file.}
}
\value{
none
}
\description{
Calculates SNP-level FST values given two populations
}
| /man/calcFST.Rd | permissive | BaderLab/POPPATHR | R | false | true | 614 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcFST.R
\name{calcFST}
\alias{calcFST}
\title{Calculates SNP-level FST values given two populations}
\usage{
calcFST(genotype_file, fam_file, output_folder, output_file)
}
\arguments{
\item{genotype_file}{(char) path to PLINK-formatted SNP genotype data.}
\item{fam_file}{(char) path to PLINK population-coded fam file.}
\item{output_folder}{(char) path to output directory.}
\item{output_file}{(char) path to final GSEA-formatted input file.}
}
\value{
none
}
\description{
Calculates SNP-level FST values given two populations
}
|
testlist <- list(A = structure(c(2.31584307023018e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613101887-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 343 | r | testlist <- list(A = structure(c(2.31584307023018e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
library(tidyverse)
# Load in table of nested CV results
f <- "/path/to/BLSF_AGEs_ElasticNet_GSCV_NestedCVResults.csv"
p <- "/path/to/BLSF_AGEs_ElasticNet_GSCV_NestedCVResults.pdf"
g <- "/path/to/BLSF_AGEs_ElasticNet_GSCV_NestedCVResults.png"
CV_raw <- readr::read_csv(f) %>%
dplyr::select(Accuracy = test_accuracy, Sensitivity = test_sensitivity, Specificity = test_specificity, PPV = test_PPV, AUC = test_AUC, F1 = test_f1)
CV <- readr::read_csv(f) %>%
dplyr::select(Accuracy = test_accuracy, Sensitivity = test_sensitivity, Specificity = test_specificity, PPV = test_PPV, AUC = test_AUC, F1 = test_f1) %>%
tidyr::gather(key = Statistic, value = Score)
# Mean accuracy
mean(CV_raw$Accuracy)
# Order variables
CV$Statistic <- factor(CV$Statistic,levels = c("Accuracy", "Sensitivity", "Specificity", "PPV", "AUC", "F1"))
# Dot Plot with Mean + 95% CI indicated
ggplot(data= CV, mapping = aes(x=Statistic, y=Score)) +
ylim(0, 1) +
geom_dotplot(binaxis='y', stackdir = 'center', dotsize = 0.7) +
stat_summary(fun.y = mean, geom = "errorbar", aes(ymax = ..y.., ymin = ..y..),
width = 0.75, size = 1, linetype = "solid", colour = "red") +
stat_summary(fun.data = mean_cl_normal, geom = "errorbar",
width = (0.75/2), size = 0.5, linetype = "solid", colour = "red") +
labs(y = "Score", x = "Statistic", title = "Elastic Net") +
theme(text = element_text(size=12), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
plot.title = element_text(hjust = 0.5))
ggsave(p, width=5, height=4, units="in")
ggsave(g, width=5, height=4, units="in")
| /Plotting/BLSF_AGEs_ElasticNet_NestedCVResults.R | no_license | nathanpincus/PA_Virulence_Prediction | R | false | false | 1,705 | r | library(tidyverse)
# Load in table of nested CV results
f <- "/path/to/BLSF_AGEs_ElasticNet_GSCV_NestedCVResults.csv"
p <- "/path/to/BLSF_AGEs_ElasticNet_GSCV_NestedCVResults.pdf"
g <- "/path/to/BLSF_AGEs_ElasticNet_GSCV_NestedCVResults.png"
CV_raw <- readr::read_csv(f) %>%
dplyr::select(Accuracy = test_accuracy, Sensitivity = test_sensitivity, Specificity = test_specificity, PPV = test_PPV, AUC = test_AUC, F1 = test_f1)
CV <- readr::read_csv(f) %>%
dplyr::select(Accuracy = test_accuracy, Sensitivity = test_sensitivity, Specificity = test_specificity, PPV = test_PPV, AUC = test_AUC, F1 = test_f1) %>%
tidyr::gather(key = Statistic, value = Score)
# Mean accuracy
mean(CV_raw$Accuracy)
# Order variables
CV$Statistic <- factor(CV$Statistic,levels = c("Accuracy", "Sensitivity", "Specificity", "PPV", "AUC", "F1"))
# Dot Plot with Mean + 95% CI indicated
ggplot(data= CV, mapping = aes(x=Statistic, y=Score)) +
ylim(0, 1) +
geom_dotplot(binaxis='y', stackdir = 'center', dotsize = 0.7) +
stat_summary(fun.y = mean, geom = "errorbar", aes(ymax = ..y.., ymin = ..y..),
width = 0.75, size = 1, linetype = "solid", colour = "red") +
stat_summary(fun.data = mean_cl_normal, geom = "errorbar",
width = (0.75/2), size = 0.5, linetype = "solid", colour = "red") +
labs(y = "Score", x = "Statistic", title = "Elastic Net") +
theme(text = element_text(size=12), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
plot.title = element_text(hjust = 0.5))
ggsave(p, width=5, height=4, units="in")
ggsave(g, width=5, height=4, units="in")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/renv.R
\name{renv_new}
\alias{renv_new}
\title{Create a minimal R environment for a post}
\usage{
renv_new(name, collection = "posts")
}
\arguments{
\item{name}{The folder in which the article is located}
\item{collection}{The collection the article belongs to (default = "posts")}
}
\value{
??
}
\description{
Create a minimal R environment for a post
}
\details{
Creates the renv library and installs renv, distill and refinery
}
| /man/renv_new.Rd | permissive | wzbillings/refinery | R | false | true | 511 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/renv.R
\name{renv_new}
\alias{renv_new}
\title{Create a minimal R environment for a post}
\usage{
renv_new(name, collection = "posts")
}
\arguments{
\item{name}{The folder in which the article is located}
\item{collection}{The collection the article belongs to (default = "posts")}
}
\value{
??
}
\description{
Create a minimal R environment for a post
}
\details{
Creates the renv library and installs renv, distill and refinery
}
|
df <- d %>%
select(name, compromise, F2, F4, F7, F8, F9, PreT2:PreT8,issue) %>%
mutate(compromise = if_else(compromise==1, "Compromise", "No Compromise")) %>%
mutate_at(c("F2", "F4", "F7", "F8", "PreT2", "PreT3", "PreT4", "PreT5", "PreT6",
"PreT7_1", "PreT7_2", "PreT7_3", "PreT7_4", "PreT7_5", "PreT7_6",
"PreT7_7", "PreT8"), ~(scale(.) %>% as.vector)) %>%
pivot_longer(cols = F2:PreT8,
names_to = "variables") %>%
mutate(variables = recode(variables,
`F2` = "Age",
`F4` = "Urbaness",
`F7` = "Employment",
`F8` = "Income",
`F9` = "Education",
`PreT2` = "Position: Immigration",
`PreT3` = "Position: Defense",
`PreT4` = "Position: Education",
`PreT5` = "Attitude: Women in Politics",
`PreT6` = "Ideology",
`PreT7_1` = "Political Efficacy (1)",
`PreT7_2` = "Political Efficacy (2)",
`PreT7_3` = "Political Efficacy (3)",
`PreT7_4` = "Political Efficacy (4)",
`PreT7_5` = "Political Efficacy (5)",
`PreT7_6` = "Political Efficacy (6)",
`PreT7_7` = "Political Efficacy (7)",
`PreT8` = "Position: COVID-19"))
p1 <- df %>% filter(issue == "COVID") %>%
ggplot(aes(x=variables, y=value, fill=name, color=name)) +
geom_violin(width=2.1, size=0.2) +
facet_grid(name~compromise, scales = "free") +
scale_fill_manual(values = fig_cols) +
scale_color_manual(values = fig_cols) +
labs(x = "", y = "", title = "COVID") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
legend.position="none",
legend.title = element_blank(),
axis.text=element_text(size=8)) +
coord_flip()
p2 <- df %>% filter(issue == "Defense") %>%
ggplot(aes(x=variables, y=value, fill=name, color=name)) +
geom_violin(width=2.1, size=0.2) +
facet_grid(name~compromise, scales = "free") +
scale_fill_manual(values = fig_cols) +
scale_color_manual(values = fig_cols) +
labs(x = "", y = "", title = "Defense") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
legend.position="none",
legend.title = element_blank(),
axis.text=element_text(size=8)) +
coord_flip()
p3 <- df %>% filter(issue == "Education") %>%
ggplot(aes(x=variables, y=value, fill=name, color=name)) +
geom_violin(width=2.1, size=0.2) +
facet_grid(name~compromise, scales = "free") +
scale_fill_manual(values = fig_cols) +
scale_color_manual(values = fig_cols) +
labs(x = "", y = "", title = "Education") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
legend.position="none",
legend.title = element_blank(),
axis.text=element_text(size=8)) +
coord_flip()
p4 <- df %>% filter(issue == "Immigration") %>%
ggplot(aes(x=variables, y=value, fill=name, color=name)) +
geom_violin(width=2.1, size=0.2) +
facet_grid(name~compromise, scales = "free") +
scale_fill_manual(values = fig_cols) +
scale_color_manual(values = fig_cols) +
labs(x = "", y = "", title = "Immigration") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
legend.position="none",
legend.title = element_blank(),
axis.text=element_text(size=8)) +
coord_flip()
| /src/analysis/distribution_unbalanced_vars.R | permissive | MarikenvdVelden/bend-break-compromise | R | false | false | 3,757 | r | df <- d %>%
select(name, compromise, F2, F4, F7, F8, F9, PreT2:PreT8,issue) %>%
mutate(compromise = if_else(compromise==1, "Compromise", "No Compromise")) %>%
mutate_at(c("F2", "F4", "F7", "F8", "PreT2", "PreT3", "PreT4", "PreT5", "PreT6",
"PreT7_1", "PreT7_2", "PreT7_3", "PreT7_4", "PreT7_5", "PreT7_6",
"PreT7_7", "PreT8"), ~(scale(.) %>% as.vector)) %>%
pivot_longer(cols = F2:PreT8,
names_to = "variables") %>%
mutate(variables = recode(variables,
`F2` = "Age",
`F4` = "Urbaness",
`F7` = "Employment",
`F8` = "Income",
`F9` = "Education",
`PreT2` = "Position: Immigration",
`PreT3` = "Position: Defense",
`PreT4` = "Position: Education",
`PreT5` = "Attitude: Women in Politics",
`PreT6` = "Ideology",
`PreT7_1` = "Political Efficacy (1)",
`PreT7_2` = "Political Efficacy (2)",
`PreT7_3` = "Political Efficacy (3)",
`PreT7_4` = "Political Efficacy (4)",
`PreT7_5` = "Political Efficacy (5)",
`PreT7_6` = "Political Efficacy (6)",
`PreT7_7` = "Political Efficacy (7)",
`PreT8` = "Position: COVID-19"))
p1 <- df %>% filter(issue == "COVID") %>%
ggplot(aes(x=variables, y=value, fill=name, color=name)) +
geom_violin(width=2.1, size=0.2) +
facet_grid(name~compromise, scales = "free") +
scale_fill_manual(values = fig_cols) +
scale_color_manual(values = fig_cols) +
labs(x = "", y = "", title = "COVID") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
legend.position="none",
legend.title = element_blank(),
axis.text=element_text(size=8)) +
coord_flip()
p2 <- df %>% filter(issue == "Defense") %>%
ggplot(aes(x=variables, y=value, fill=name, color=name)) +
geom_violin(width=2.1, size=0.2) +
facet_grid(name~compromise, scales = "free") +
scale_fill_manual(values = fig_cols) +
scale_color_manual(values = fig_cols) +
labs(x = "", y = "", title = "Defense") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
legend.position="none",
legend.title = element_blank(),
axis.text=element_text(size=8)) +
coord_flip()
p3 <- df %>% filter(issue == "Education") %>%
ggplot(aes(x=variables, y=value, fill=name, color=name)) +
geom_violin(width=2.1, size=0.2) +
facet_grid(name~compromise, scales = "free") +
scale_fill_manual(values = fig_cols) +
scale_color_manual(values = fig_cols) +
labs(x = "", y = "", title = "Education") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
legend.position="none",
legend.title = element_blank(),
axis.text=element_text(size=8)) +
coord_flip()
p4 <- df %>% filter(issue == "Immigration") %>%
ggplot(aes(x=variables, y=value, fill=name, color=name)) +
geom_violin(width=2.1, size=0.2) +
facet_grid(name~compromise, scales = "free") +
scale_fill_manual(values = fig_cols) +
scale_color_manual(values = fig_cols) +
labs(x = "", y = "", title = "Immigration") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
legend.position="none",
legend.title = element_blank(),
axis.text=element_text(size=8)) +
coord_flip()
|
dfhead <- read.table("household_power_consumption.txt",header=FALSE, sep=";",nrows=1,stringsAsFactors = FALSE)
df <- read.table("household_power_consumption.txt",header=FALSE, sep=";",skip= grep("1/2/2007",readLines("household_power_consumption.txt")),nrows=2878)
colnames(df) <- unlist(dfhead)
##df2 <- df
##df2$Date <- as.Date(strptime(df2[,1],"%d/%m/%Y"))
##datetimestr <- paste(df2$Date,df2$Time,collapse = " ")
hist(df$Global_active_power,col = "red",xlab = "Global Active Power (kilowatts)",ylab="Frequency",main="Global Active Power")
dev.copy(png,"plot1.png")
dev.off() | /plot1.R | no_license | gaayatri/ExData_Plotting1 | R | false | false | 578 | r | dfhead <- read.table("household_power_consumption.txt",header=FALSE, sep=";",nrows=1,stringsAsFactors = FALSE)
df <- read.table("household_power_consumption.txt",header=FALSE, sep=";",skip= grep("1/2/2007",readLines("household_power_consumption.txt")),nrows=2878)
colnames(df) <- unlist(dfhead)
##df2 <- df
##df2$Date <- as.Date(strptime(df2[,1],"%d/%m/%Y"))
##datetimestr <- paste(df2$Date,df2$Time,collapse = " ")
hist(df$Global_active_power,col = "red",xlab = "Global Active Power (kilowatts)",ylab="Frequency",main="Global Active Power")
dev.copy(png,"plot1.png")
dev.off() |
## Matrix inversion is usually a costly compuation
## and there may be some benefit to caching the inverse of a matrix
## rather than compute it repeatedly
## This function creates a special "matrix" object that can
## cache its inverse
makeCacheMatrix <- function(x = numeric()) {
cache <- NULL
setMatrix <- function(newValue) {
x <<- newValue
cache <<- NULL
}
getMatrix <- function() {
x
}
cacheInverse <- function(solve) {
cache <<- solve
}
getInverse <- function() {
cache
}
list(setMatrix = setMatrix, getMatrix = getMatrix, cacheInverse = cacheInverse, getInverse = getInverse)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above
## If the inverse has already been calculated (and the matrix
## has not changed) then the cachesolve should retrieve
## the inverse from the cache
cacheSolve <- function(y, ...) {
inverse <- y$getInverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- y$getMatrix()
inverse <- solve(data)
y$cacheInverse(inverse)
inverse
} | /cachematrix.R | no_license | reemai/ProgrammingAssignment2 | R | false | false | 1,132 | r | ## Matrix inversion is usually a costly compuation
## and there may be some benefit to caching the inverse of a matrix
## rather than compute it repeatedly
## This function creates a special "matrix" object that can
## cache its inverse
makeCacheMatrix <- function(x = numeric()) {
cache <- NULL
setMatrix <- function(newValue) {
x <<- newValue
cache <<- NULL
}
getMatrix <- function() {
x
}
cacheInverse <- function(solve) {
cache <<- solve
}
getInverse <- function() {
cache
}
list(setMatrix = setMatrix, getMatrix = getMatrix, cacheInverse = cacheInverse, getInverse = getInverse)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above
## If the inverse has already been calculated (and the matrix
## has not changed) then the cachesolve should retrieve
## the inverse from the cache
cacheSolve <- function(y, ...) {
inverse <- y$getInverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- y$getMatrix()
inverse <- solve(data)
y$cacheInverse(inverse)
inverse
} |
### process_WEF.R:
### Do not run stand-alone - source from main data_prep.R for TourismRecreation.
###
### reformat and add rgn_ids to World Economic Forum (WEF) data
###
### Provenance:
### Jun2015 Casey O'Hara - updated for 2015, removed gapfilling, set up for .csv instead of .pdf
### Mar2014 JStewartLowndes; updated from 'clean_WEF.R' by JStewart in May 2013
### May2013 'clean_WEF.R' by JStewart
###
### Data:
### TTCI: Travel and Tourism competitiveness:
### * download .xlsx: http://www3.weforum.org/docs/TT15/WEF_TTCR_Dataset_2015.xlsx
### * note: only 2015 is represented here.
### * read report online: http://reports.weforum.org/travel-and-tourism-competitiveness-report-2015/
### * table 1: http://reports.weforum.org/travel-and-tourism-competitiveness-report-2015/
### index-results-the-travel-tourism-competitiveness-index-ranking-2015/
###
### GCI: Global Competitiveness (not used in 2015 for TR goal; see data_prep_GCI.R in globalprep/WEF-Economics)
### * download .xlsx: http://www3.weforum.org/docs/GCR2014-15/GCI_Dataset_2006-07-2014-15.xlsx
### * note: contains data for each year from 2006/2007 to 2014/2015
### * read report: http://reports.weforum.org/global-competitiveness-report-2014-2015/
### * table 3 in this .pdf: http://reports.weforum.org/global-competitiveness-report-2014-2015/
### wp-content/blogs.dir/54/mp/files/pages/files/tables3-7-wef-globalcompetitivenessreport-2014-15-2.pdf
###
### read in individual files
### call name_to_rgn() from ohicore
##############################################################################=
### setup ----
##############################################################################=
### Libraries and such are set up within data_prep.R
dir_wef <- file.path(dir_anx, '../WEF-Economics')
##############################################################################=
### WEF TTCI formatting ----
##############################################################################=
# read in files
ttci_raw <- read.csv(file.path(dir_wef, scenario, 'raw', 'WEF_TTCR_Dataset_2015.csv'),
skip = 3, check.names = FALSE, stringsAsFactors = FALSE)
### NOTE: check.names = FALSE because of Cote d'Ivoire has an accent circonflex over the 'o' (probably other issues in there too)
ttci <- ttci_raw[1, names(ttci_raw) != '']
### first row is index scores for 2015.
### After column 150, a bunch of unnamed columns that throw errors
ttci <- ttci %>%
select(-(1:2), -(4:9), year = Edition) %>%
gather(country, value, -year) %>%
mutate(score = as.numeric(value))
### Rescale all scores (out of 7) to range from 0 - 1.
# ttci <- ttci %>%
# mutate(score = score/7)
ttci_rgn <- name_to_rgn(ttci, fld_name='country',
flds_unique=c('country', 'year'), fld_value='score',
collapse_fxn = 'mean', add_rgn_name = T) %>%
arrange(rgn_id, year)
head(ttci_rgn, 10)
# rgn_id year score rgn_name
# 1 14 2015 4.35 Taiwan
# 2 15 2015 3.63 Philippines
# 3 16 2015 4.98 Australia
# 4 20 2015 4.37 South Korea
# 5 24 2015 3.24 Cambodia
# 6 25 2015 4.26 Thailand
# 7 31 2015 4.00 Seychelles
# 8 37 2015 3.90 Mauritius
# 9 40 2015 3.80 Sri Lanka
# 10 41 2015 2.81 Mozambique
### Save TTCI data file
ttci_file <- file.path(dir_git, scenario, 'intermediate/wef_ttci_2015.csv')
write_csv(ttci_rgn, ttci_file)
| /globalprep/tr/v2015/R/process_WEF.R | no_license | OHI-Science/ohiprep_v2018 | R | false | false | 3,561 | r | ### process_WEF.R:
### Do not run stand-alone - source from main data_prep.R for TourismRecreation.
###
### reformat and add rgn_ids to World Economic Forum (WEF) data
###
### Provenance:
### Jun2015 Casey O'Hara - updated for 2015, removed gapfilling, set up for .csv instead of .pdf
### Mar2014 JStewartLowndes; updated from 'clean_WEF.R' by JStewart in May 2013
### May2013 'clean_WEF.R' by JStewart
###
### Data:
### TTCI: Travel and Tourism competitiveness:
### * download .xlsx: http://www3.weforum.org/docs/TT15/WEF_TTCR_Dataset_2015.xlsx
### * note: only 2015 is represented here.
### * read report online: http://reports.weforum.org/travel-and-tourism-competitiveness-report-2015/
### * table 1: http://reports.weforum.org/travel-and-tourism-competitiveness-report-2015/
### index-results-the-travel-tourism-competitiveness-index-ranking-2015/
###
### GCI: Global Competitiveness (not used in 2015 for TR goal; see data_prep_GCI.R in globalprep/WEF-Economics)
### * download .xlsx: http://www3.weforum.org/docs/GCR2014-15/GCI_Dataset_2006-07-2014-15.xlsx
### * note: contains data for each year from 2006/2007 to 2014/2015
### * read report: http://reports.weforum.org/global-competitiveness-report-2014-2015/
### * table 3 in this .pdf: http://reports.weforum.org/global-competitiveness-report-2014-2015/
### wp-content/blogs.dir/54/mp/files/pages/files/tables3-7-wef-globalcompetitivenessreport-2014-15-2.pdf
###
### read in individual files
### call name_to_rgn() from ohicore
##############################################################################=
### setup ----
##############################################################################=
### Libraries and such are set up within data_prep.R
dir_wef <- file.path(dir_anx, '../WEF-Economics')
##############################################################################=
### WEF TTCI formatting ----
##############################################################################=
# read in files
ttci_raw <- read.csv(file.path(dir_wef, scenario, 'raw', 'WEF_TTCR_Dataset_2015.csv'),
skip = 3, check.names = FALSE, stringsAsFactors = FALSE)
### NOTE: check.names = FALSE because of Cote d'Ivoire has an accent circonflex over the 'o' (probably other issues in there too)
ttci <- ttci_raw[1, names(ttci_raw) != '']
### first row is index scores for 2015.
### After column 150, a bunch of unnamed columns that throw errors
ttci <- ttci %>%
select(-(1:2), -(4:9), year = Edition) %>%
gather(country, value, -year) %>%
mutate(score = as.numeric(value))
### Rescale all scores (out of 7) to range from 0 - 1.
# ttci <- ttci %>%
# mutate(score = score/7)
ttci_rgn <- name_to_rgn(ttci, fld_name='country',
flds_unique=c('country', 'year'), fld_value='score',
collapse_fxn = 'mean', add_rgn_name = T) %>%
arrange(rgn_id, year)
head(ttci_rgn, 10)
# rgn_id year score rgn_name
# 1 14 2015 4.35 Taiwan
# 2 15 2015 3.63 Philippines
# 3 16 2015 4.98 Australia
# 4 20 2015 4.37 South Korea
# 5 24 2015 3.24 Cambodia
# 6 25 2015 4.26 Thailand
# 7 31 2015 4.00 Seychelles
# 8 37 2015 3.90 Mauritius
# 9 40 2015 3.80 Sri Lanka
# 10 41 2015 2.81 Mozambique
### Save TTCI data file
ttci_file <- file.path(dir_git, scenario, 'intermediate/wef_ttci_2015.csv')
write_csv(ttci_rgn, ttci_file)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ClusterLhnData.R
\name{ClusterLhnData}
\alias{ClusterLhnData}
\title{A function for fitting Shahar's LHN data to the linear-nonlinear-poisson
model.}
\usage{
ClusterLhnData(Data, numClusters = 3, kalpha = 10, thalpha = 3/20,
sdv0 = 0.1, taua = 1, taul0 = 0.5, minIters = 0, numIters = 10000,
dt = 1e-05, seed = 0, initMode = "random", iclust = NULL,
verbose = TRUE, timer = "OFF", slopeRatioToStop = 100,
numSlopePoints = 20, checkToStopEvery = 100, keepHistory = NULL,
keepHistoryAt = NULL, maxPreFitIters = 1)
}
\arguments{
\item{Data}{FIXME - what should the input data look like?!}
\item{numClusters}{The number of clusters to use.}
\item{kalpha}{The shape parameter for the gamma prior on alpha.}
\item{thalpha}{The scale parameter for the gamma prior on alpha}
\item{sdv0}{The standard deviation of the gaussian prior on membrane potential offset.}
\item{taua}{The rate constant for the exponential prior on the drive
parameters}
\item{taul0}{The rate constant for the exponential prior on l0.}
\item{minIters}{The minumum number of iterations.}
\item{numIters}{The maximum number of iterations.}
\item{dt}{The time constant of the updates.}
\item{seed}{The random seed to use.}
\item{initMode}{The initialization mode for the clustering. Can be "random",
"kmeans", or "kmeans++".}
\item{iclust}{An initial clustering assignment, if any.}
\item{verbose}{If TRUE will print out the progress of the algorithm and other
diagonstic information.}
\item{timer}{If "ON" will time different blocks of the code.}
\item{slopeRatioToStop}{The ratio of the rate of change of the objective at
the end to the start above which to terminate.}
\item{numSlopePoints}{How many points to take to compute the slope of the
objective}
\item{checkToStopEvery}{How often to compute the stopping ratio.}
\item{keepHistory}{A least of strings containing the variables to track.}
\item{keepHistoryAt}{A list of iterations at which to record history. If NULL defaults to all.}
\item{maxPreFitIters}{The maximum number of iterations to pre fit the cell-specific parameters to the clusters. If set to 0 will not prefit the parameters.}
}
\value{
A list consisting of
\item{seed}{The random seed used.}
\item{a}{A 2 x S x K array containing the learned drive parameters}
\item{al}{A N x 1 vector containing the learned alpha values}
\item{v0}{A N x 1 vector containing the learned v0 parameters}
\item{l0}{A N x 1 vector containing the learned l0 parameters}
\item{qnk}{A N x K matrix of the cluster responsibilities for each data
point.}
\item{L}{A T x S x N x K array containing the final lambda values}
\item{Lclust}{A T x S x N array containing the final lambda value for the most likely cluster for each fit.}
\item{numIters}{The actual number of iterations that ran.}
\item{F}{A numIters x 1 array containing the objective function as function
of the number of iterations.}
\item{clust}{A N x 1 vector of cluster assignments.}
\item{pclust}{A N x 1 vector of the probabilities of the cluster chosen.}
\item{dclust}{A N x 1 vector of distances to its cluster center.}
\item{exitMode}{A string with the exit mode of the algorithm: "ITERS" if it
hit the maximum number of iterations, "SLOPE_RATIO" if it exited early due
to the slope ratio.}
\item{history}{A list containing the values of the tracked variables for the specified iterations.}
\item{misc}{A miscellaneous list to hold other variables, used mainly for debugging.}
}
\description{
In the following, T: the number of time bins, S: the number of odors, N: the
number of cells, K: the number of clusters.
}
| /man/ClusterLhnData.Rd | no_license | sfrechter/physplit.analysis | R | false | true | 3,709 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ClusterLhnData.R
\name{ClusterLhnData}
\alias{ClusterLhnData}
\title{A function for fitting Shahar's LHN data to the linear-nonlinear-poisson
model.}
\usage{
ClusterLhnData(Data, numClusters = 3, kalpha = 10, thalpha = 3/20,
sdv0 = 0.1, taua = 1, taul0 = 0.5, minIters = 0, numIters = 10000,
dt = 1e-05, seed = 0, initMode = "random", iclust = NULL,
verbose = TRUE, timer = "OFF", slopeRatioToStop = 100,
numSlopePoints = 20, checkToStopEvery = 100, keepHistory = NULL,
keepHistoryAt = NULL, maxPreFitIters = 1)
}
\arguments{
\item{Data}{FIXME - what should the input data look like?!}
\item{numClusters}{The number of clusters to use.}
\item{kalpha}{The shape parameter for the gamma prior on alpha.}
\item{thalpha}{The scale parameter for the gamma prior on alpha}
\item{sdv0}{The standard deviation of the gaussian prior on membrane potential offset.}
\item{taua}{The rate constant for the exponential prior on the drive
parameters}
\item{taul0}{The rate constant for the exponential prior on l0.}
\item{minIters}{The minumum number of iterations.}
\item{numIters}{The maximum number of iterations.}
\item{dt}{The time constant of the updates.}
\item{seed}{The random seed to use.}
\item{initMode}{The initialization mode for the clustering. Can be "random",
"kmeans", or "kmeans++".}
\item{iclust}{An initial clustering assignment, if any.}
\item{verbose}{If TRUE will print out the progress of the algorithm and other
diagonstic information.}
\item{timer}{If "ON" will time different blocks of the code.}
\item{slopeRatioToStop}{The ratio of the rate of change of the objective at
the end to the start above which to terminate.}
\item{numSlopePoints}{How many points to take to compute the slope of the
objective}
\item{checkToStopEvery}{How often to compute the stopping ratio.}
\item{keepHistory}{A least of strings containing the variables to track.}
\item{keepHistoryAt}{A list of iterations at which to record history. If NULL defaults to all.}
\item{maxPreFitIters}{The maximum number of iterations to pre fit the cell-specific parameters to the clusters. If set to 0 will not prefit the parameters.}
}
\value{
A list consisting of
\item{seed}{The random seed used.}
\item{a}{A 2 x S x K array containing the learned drive parameters}
\item{al}{A N x 1 vector containing the learned alpha values}
\item{v0}{A N x 1 vector containing the learned v0 parameters}
\item{l0}{A N x 1 vector containing the learned l0 parameters}
\item{qnk}{A N x K matrix of the cluster responsibilities for each data
point.}
\item{L}{A T x S x N x K array containing the final lambda values}
\item{Lclust}{A T x S x N array containing the final lambda value for the most likely cluster for each fit.}
\item{numIters}{The actual number of iterations that ran.}
\item{F}{A numIters x 1 array containing the objective function as function
of the number of iterations.}
\item{clust}{A N x 1 vector of cluster assignments.}
\item{pclust}{A N x 1 vector of the probabilities of the cluster chosen.}
\item{dclust}{A N x 1 vector of distances to its cluster center.}
\item{exitMode}{A string with the exit mode of the algorithm: "ITERS" if it
hit the maximum number of iterations, "SLOPE_RATIO" if it exited early due
to the slope ratio.}
\item{history}{A list containing the values of the tracked variables for the specified iterations.}
\item{misc}{A miscellaneous list to hold other variables, used mainly for debugging.}
}
\description{
In the following, T: the number of time bins, S: the number of odors, N: the
number of cells, K: the number of clusters.
}
|
server = function(input, output, session) {
respCol <- reactive({ which(colnames(mtcars)%in%input$response) })
output$factors <- renderUI({
selectInput('factor2',
'Choose Factors',
choices = colnames(mtcars)[-respCol()],
multiple = T)
})
output$output <- renderPlot({
par(oma = c(0,0,0,0), mar = c(5,4,2,2))
data = mtcars
response <- data[,respCol()]
`if`(is.null(input$factor2),
model.mat <- rep(1,length(response)),
model.mat <- as.matrix(data[input$factor2]))
lm.model <- lm(response ~ model.mat)
lm.coeff <- lm.model$coeff
lm.modmat <- model.matrix(lm.model)
responses <- lm.modmat%*%lm.coeff
range <- mean(responses) + c(-3,3) * sd(responses)
MSE <- sum(lm.model$residuals^2)/(length(lm.model$fitted.values)-2)
models <- lapply(X = responses,
FUN = function(y) dnorm(seq(range[1],range[2], length.out = 100),
mean = y,
sd = sqrt(MSE)))
plot(x = seq(range[1],range[2], length.out = 100),
y = models[[1]],
type = 'l')
lapply(X = 2:length(models),
FUN = function(x)lines(x = seq(range[1],range[2], length.out = 100),
y = models[[x]]))
})
} | /inst/apps/significant_factors/server.R | no_license | Auburngrads/teachingApps | R | false | false | 1,333 | r | server = function(input, output, session) {
respCol <- reactive({ which(colnames(mtcars)%in%input$response) })
output$factors <- renderUI({
selectInput('factor2',
'Choose Factors',
choices = colnames(mtcars)[-respCol()],
multiple = T)
})
output$output <- renderPlot({
par(oma = c(0,0,0,0), mar = c(5,4,2,2))
data = mtcars
response <- data[,respCol()]
`if`(is.null(input$factor2),
model.mat <- rep(1,length(response)),
model.mat <- as.matrix(data[input$factor2]))
lm.model <- lm(response ~ model.mat)
lm.coeff <- lm.model$coeff
lm.modmat <- model.matrix(lm.model)
responses <- lm.modmat%*%lm.coeff
range <- mean(responses) + c(-3,3) * sd(responses)
MSE <- sum(lm.model$residuals^2)/(length(lm.model$fitted.values)-2)
models <- lapply(X = responses,
FUN = function(y) dnorm(seq(range[1],range[2], length.out = 100),
mean = y,
sd = sqrt(MSE)))
plot(x = seq(range[1],range[2], length.out = 100),
y = models[[1]],
type = 'l')
lapply(X = 2:length(models),
FUN = function(x)lines(x = seq(range[1],range[2], length.out = 100),
y = models[[x]]))
})
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_bed.R
\name{read_bed}
\alias{read_bed}
\title{Read a BED file}
\usage{
read_bed(file, col_names = def_names("bed"), col_types = def_types("bed"), ...)
}
\arguments{
\item{file}{Either a path to a file, a connection, or literal data
(either a single string or a raw vector).
Files ending in \code{.gz}, \code{.bz2}, \code{.xz}, or \code{.zip} will
be automatically uncompressed. Files starting with \verb{http://},
\verb{https://}, \verb{ftp://}, or \verb{ftps://} will be automatically
downloaded. Remote gz files can also be automatically downloaded and
decompressed.
Literal data is most useful for examples and tests. It must contain at
least one new line to be recognised as data (instead of a path) or be a
vector of greater than length 1.
Using a value of \code{\link[readr:clipboard]{clipboard()}} will read from the system clipboard.}
\item{col_names}{column names to use. Defaults to \link{def_names("bed")}
compatible with blast tabular output (\verb{--outfmt 6/7} in blast++ and \code{-m8}
in blast-legacy). \link{def_names("bed")} can easily be combined with extra
columns: \code{col_names = c(def_names("bed"), "more", "things")}.}
\item{col_types}{One of \code{NULL}, a \code{\link[readr:cols]{cols()}} specification, or
a string. See \code{vignette("readr")} for more details.
If \code{NULL}, all column types will be imputed from the first 1000 rows
on the input. This is convenient (and fast), but not robust. If the
imputation fails, you'll need to supply the correct types yourself.
If a column specification created by \code{\link[readr:cols]{cols()}}, it must contain
one column specification for each column. If you only want to read a
subset of the columns, use \code{\link[readr:cols]{cols_only()}}.
Alternatively, you can use a compact string representation where each
character represents one column:
\itemize{
\item c = character
\item i = integer
\item n = number
\item d = double
\item l = logical
\item f = factor
\item D = date
\item T = date time
\item t = time
\item ? = guess
\item _ or - = skip
By default, reading a file without a column specification will print a
message showing what \code{readr} guessed they were. To remove this message,
use \code{col_types = cols()}.
}}
}
\value{
}
\description{
Read a BED file
}
| /man/read_bed.Rd | permissive | quanrd/gggenomes | R | false | true | 2,349 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_bed.R
\name{read_bed}
\alias{read_bed}
\title{Read a BED file}
\usage{
read_bed(file, col_names = def_names("bed"), col_types = def_types("bed"), ...)
}
\arguments{
\item{file}{Either a path to a file, a connection, or literal data
(either a single string or a raw vector).
Files ending in \code{.gz}, \code{.bz2}, \code{.xz}, or \code{.zip} will
be automatically uncompressed. Files starting with \verb{http://},
\verb{https://}, \verb{ftp://}, or \verb{ftps://} will be automatically
downloaded. Remote gz files can also be automatically downloaded and
decompressed.
Literal data is most useful for examples and tests. It must contain at
least one new line to be recognised as data (instead of a path) or be a
vector of greater than length 1.
Using a value of \code{\link[readr:clipboard]{clipboard()}} will read from the system clipboard.}
\item{col_names}{column names to use. Defaults to \link{def_names("bed")}
compatible with blast tabular output (\verb{--outfmt 6/7} in blast++ and \code{-m8}
in blast-legacy). \link{def_names("bed")} can easily be combined with extra
columns: \code{col_names = c(def_names("bed"), "more", "things")}.}
\item{col_types}{One of \code{NULL}, a \code{\link[readr:cols]{cols()}} specification, or
a string. See \code{vignette("readr")} for more details.
If \code{NULL}, all column types will be imputed from the first 1000 rows
on the input. This is convenient (and fast), but not robust. If the
imputation fails, you'll need to supply the correct types yourself.
If a column specification created by \code{\link[readr:cols]{cols()}}, it must contain
one column specification for each column. If you only want to read a
subset of the columns, use \code{\link[readr:cols]{cols_only()}}.
Alternatively, you can use a compact string representation where each
character represents one column:
\itemize{
\item c = character
\item i = integer
\item n = number
\item d = double
\item l = logical
\item f = factor
\item D = date
\item T = date time
\item t = time
\item ? = guess
\item _ or - = skip
By default, reading a file without a column specification will print a
message showing what \code{readr} guessed they were. To remove this message,
use \code{col_types = cols()}.
}}
}
\value{
}
\description{
Read a BED file
}
|
# | -------------------------------------------------------------------------- |
# | Getting & Cleaning Data - Prograamming Assignment - John Christensen
# | -------------------------------------------------------------------------- |
# | -------------------------------------------------------------------------- |
# | Instructions
# | -------------------------------------------------------------------------- |
# The purpose of this project is to demonstrate your ability to collect, work with,
# and clean a data set.
#
# Review criteria
### The submitted data set is tidy.
### The Github repo contains the required scripts.
### GitHub contains a code book that modifies and updates the available codebooks with
### the data to indicate all the variables and summaries calculated, along with units,
### and any other relevant information.
### The README that explains the analysis files is clear and understandable.
### The work submitted for this project is the work of the student who submitted it.
#
# Getting and Cleaning Data Course Projectless
### The purpose of this project is to demonstrate your ability to collect, work with,
### and clean a data set. The goal is to prepare tidy data that can be used for later analysis.
### You will be graded by your peers on a series of yes/no questions related to the project.
### You will be required to submit: 1) a tidy data set as described below, 2) a link
### to a Github repository with your script for performing the analysis, and 3) a
### code book that describes the variables, the data, and any transformations or
### work that you performed to clean up the data called CodeBook.md. You should also
### include a README.md in the repo with your scripts. This repo explains how all of
### the scripts work and how they are connected.
#
# One of the most exciting areas in all of data science right now is wearable
# computing - see for example this article . Companies like Fitbit, Nike, and
# Jawbone Up are racing to develop the most advanced algorithms to attract new users.
# The data linked to from the course website represent data collected from the
# accelerometers from the Samsung Galaxy S smartphone. A full description is
# available at the site where the data was obtained:
#
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#
# Here are the data for the project:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
#
# You should create one R script called run_analysis.R that does the following.
#### Merges the training and the test sets to create one data set.
#### Extracts only the measurements on the mean and standard deviation for each measurement.
#### Uses descriptive activity names to name the activities in the data set
#### Appropriately labels the data set with descriptive variable names.
#### From the data set in step 4, creates a second, independent tidy data set with
#### the average of each variable for each activity and each subject.
# Good luck!
# | -------------------------------------------------------------------------- |
# | Prep
# | -------------------------------------------------------------------------- |
getwd()
setwd("C:/Users/john.christensen/Box Sync/John Christensen/Data Science/Coursera Data Science Specialization/3) Getting & Cleaning Data/ProgrammingAssignment")
install.packages("dplyr")
library(dplyr)
install.packages("readr")
library(readr)
# | -------------------------------------------------------------------------- |
# | Preparing training data
# | -------------------------------------------------------------------------- |
# loading training data:
trainData <- read_table2(file = "./UCI HAR Dataset/train/X_train.txt", col_names = FALSE)
# loading activityID's:
trainActivityID <- read_fwf("./UCI HAR Dataset/train/y_train.txt", col_positions = fwf_widths(widths = 1, col_names = "activityID"))
#loading subjectID's:
trainSubjectID <- read_table2(file = "./UCI HAR Dataset/train/subject_train.txt", col_names = "subjectID")
# loading activity labels:
refActivityLabels <- read_fwf("./UCI HAR Dataset/activity_labels.txt"
, col_positions = fwf_positions(start = c(1,3), end = c(1,NA), col_names = c("activityID", "activityDSC")))
# loading feature labels:
refFeatureLabels <- read_table2(file = "./UCI HAR Dataset/features.txt", col_names = c("featureID", "featureDSC"))
# making feature labels valid variable names (no "(", ")", or "-" allowed in variable names)
refFeatureLabels$featureDSC <- make.names(names = refFeatureLabels$featureDSC)
# Add feature labels to trainingData:
names(trainData) <- refFeatureLabels$featureDSC
# Join activity labels to activity data:
trainActivities <- inner_join(x = trainActivityID, y = refActivityLabels, by = c("activityID" = "activityID"))
# Bind activities & subjectID's to trainData; keep only mean or std columns:
trainData <- trainSubjectID %>%
bind_cols(trainActivities, trainData) %>%
select(subjectID:activityDSC, contains("mean"), contains("std"))
# Just to clean up unneeded data objects:
rm(list = c("trainActivities", "trainActivityID", "trainSubjectID"))
# | -------------------------------------------------------------------------- |
# | Preparing test data using same steps as above
# | -------------------------------------------------------------------------- |
testData <- read_table2(file = "./UCI HAR Dataset/test/X_test.txt", col_names = FALSE)
testActivityID <- read_fwf("./UCI HAR Dataset/test/y_test.txt", col_positions = fwf_widths(widths = 1, col_names = "activityID"))
testSubjectID <- read_table2(file = "./UCI HAR Dataset/test/subject_test.txt", col_names = "subjectID")
testActivities <- inner_join(x = testActivityID, y = refActivityLabels, by = c("activityID" = "activityID"))
names(testData) <- refFeatureLabels$featureDSC
testData <- testSubjectID %>%
bind_cols(testActivities, testData) %>%
select(subjectID, activityID, activityDSC, contains("mean"), contains("std"))
rm(list = c("testActivities", "testActivityID", "testSubjectID", "refActivityLabels", "refFeatureLabels"))
# | -------------------------------------------------------------------------- |
# | Combine train and test to make full dataset - "movementData"
# | -------------------------------------------------------------------------- |
movementData <- trainData %>%
union_all(testData) %>%
arrange(subjectID, activityID)
# | -------------------------------------------------------------------------- |
# | 2nd tidy dataset - means grouped by subject & activity
# | -------------------------------------------------------------------------- |
# From the data set in step 4, creates a second, independent tidy data set with
# the average of each variable for each activity and each subject.
# Wow! This did it! Dplyr is awesome!
meanBySubjectActivity <- movementData %>%
select(-activityID) %>%
group_by(subjectID, activityDSC) %>%
summarize_all(.funs = mean) %>%
arrange(subjectID, activityDSC)
# | -------------------------------------------------------------------------- |
# | 2nd tidy dataset - exported to CSV
# | -------------------------------------------------------------------------- |
write_csv(x = meanBySubjectActivity, path = "./MovementVarMeansBySubject&Activity")
| /run_analysis.R | no_license | john-s-christensen/CourseraGetting-CleaningData | R | false | false | 7,459 | r | # | -------------------------------------------------------------------------- |
# | Getting & Cleaning Data - Prograamming Assignment - John Christensen
# | -------------------------------------------------------------------------- |
# | -------------------------------------------------------------------------- |
# | Instructions
# | -------------------------------------------------------------------------- |
# The purpose of this project is to demonstrate your ability to collect, work with,
# and clean a data set.
#
# Review criteria
### The submitted data set is tidy.
### The Github repo contains the required scripts.
### GitHub contains a code book that modifies and updates the available codebooks with
### the data to indicate all the variables and summaries calculated, along with units,
### and any other relevant information.
### The README that explains the analysis files is clear and understandable.
### The work submitted for this project is the work of the student who submitted it.
#
# Getting and Cleaning Data Course Projectless
### The purpose of this project is to demonstrate your ability to collect, work with,
### and clean a data set. The goal is to prepare tidy data that can be used for later analysis.
### You will be graded by your peers on a series of yes/no questions related to the project.
### You will be required to submit: 1) a tidy data set as described below, 2) a link
### to a Github repository with your script for performing the analysis, and 3) a
### code book that describes the variables, the data, and any transformations or
### work that you performed to clean up the data called CodeBook.md. You should also
### include a README.md in the repo with your scripts. This repo explains how all of
### the scripts work and how they are connected.
#
# One of the most exciting areas in all of data science right now is wearable
# computing - see for example this article . Companies like Fitbit, Nike, and
# Jawbone Up are racing to develop the most advanced algorithms to attract new users.
# The data linked to from the course website represent data collected from the
# accelerometers from the Samsung Galaxy S smartphone. A full description is
# available at the site where the data was obtained:
#
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#
# Here are the data for the project:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
#
# You should create one R script called run_analysis.R that does the following.
#### Merges the training and the test sets to create one data set.
#### Extracts only the measurements on the mean and standard deviation for each measurement.
#### Uses descriptive activity names to name the activities in the data set
#### Appropriately labels the data set with descriptive variable names.
#### From the data set in step 4, creates a second, independent tidy data set with
#### the average of each variable for each activity and each subject.
# Good luck!
# | -------------------------------------------------------------------------- |
# | Prep
# | -------------------------------------------------------------------------- |
getwd()
setwd("C:/Users/john.christensen/Box Sync/John Christensen/Data Science/Coursera Data Science Specialization/3) Getting & Cleaning Data/ProgrammingAssignment")
install.packages("dplyr")
library(dplyr)
install.packages("readr")
library(readr)
# | -------------------------------------------------------------------------- |
# | Preparing training data
# | -------------------------------------------------------------------------- |
# loading training data:
trainData <- read_table2(file = "./UCI HAR Dataset/train/X_train.txt", col_names = FALSE)
# loading activityID's:
trainActivityID <- read_fwf("./UCI HAR Dataset/train/y_train.txt", col_positions = fwf_widths(widths = 1, col_names = "activityID"))
#loading subjectID's:
trainSubjectID <- read_table2(file = "./UCI HAR Dataset/train/subject_train.txt", col_names = "subjectID")
# loading activity labels:
refActivityLabels <- read_fwf("./UCI HAR Dataset/activity_labels.txt"
, col_positions = fwf_positions(start = c(1,3), end = c(1,NA), col_names = c("activityID", "activityDSC")))
# loading feature labels:
refFeatureLabels <- read_table2(file = "./UCI HAR Dataset/features.txt", col_names = c("featureID", "featureDSC"))
# making feature labels valid variable names (no "(", ")", or "-" allowed in variable names)
refFeatureLabels$featureDSC <- make.names(names = refFeatureLabels$featureDSC)
# Add feature labels to trainingData:
names(trainData) <- refFeatureLabels$featureDSC
# Join activity labels to activity data:
trainActivities <- inner_join(x = trainActivityID, y = refActivityLabels, by = c("activityID" = "activityID"))
# Bind activities & subjectID's to trainData; keep only mean or std columns:
trainData <- trainSubjectID %>%
bind_cols(trainActivities, trainData) %>%
select(subjectID:activityDSC, contains("mean"), contains("std"))
# Just to clean up unneeded data objects:
rm(list = c("trainActivities", "trainActivityID", "trainSubjectID"))
# | -------------------------------------------------------------------------- |
# | Preparing test data using same steps as above
# | -------------------------------------------------------------------------- |
testData <- read_table2(file = "./UCI HAR Dataset/test/X_test.txt", col_names = FALSE)
testActivityID <- read_fwf("./UCI HAR Dataset/test/y_test.txt", col_positions = fwf_widths(widths = 1, col_names = "activityID"))
testSubjectID <- read_table2(file = "./UCI HAR Dataset/test/subject_test.txt", col_names = "subjectID")
testActivities <- inner_join(x = testActivityID, y = refActivityLabels, by = c("activityID" = "activityID"))
names(testData) <- refFeatureLabels$featureDSC
testData <- testSubjectID %>%
bind_cols(testActivities, testData) %>%
select(subjectID, activityID, activityDSC, contains("mean"), contains("std"))
rm(list = c("testActivities", "testActivityID", "testSubjectID", "refActivityLabels", "refFeatureLabels"))
# | -------------------------------------------------------------------------- |
# | Combine train and test to make full dataset - "movementData"
# | -------------------------------------------------------------------------- |
movementData <- trainData %>%
union_all(testData) %>%
arrange(subjectID, activityID)
# | -------------------------------------------------------------------------- |
# | 2nd tidy dataset - means grouped by subject & activity
# | -------------------------------------------------------------------------- |
# From the data set in step 4, creates a second, independent tidy data set with
# the average of each variable for each activity and each subject.
# Wow! This did it! Dplyr is awesome!
meanBySubjectActivity <- movementData %>%
select(-activityID) %>%
group_by(subjectID, activityDSC) %>%
summarize_all(.funs = mean) %>%
arrange(subjectID, activityDSC)
# | -------------------------------------------------------------------------- |
# | 2nd tidy dataset - exported to CSV
# | -------------------------------------------------------------------------- |
write_csv(x = meanBySubjectActivity, path = "./MovementVarMeansBySubject&Activity")
|
# STAT149
# Shi (Stephen) Fang
setwd('/Users/sfang/Dropbox/Courses/Spring 2015/AM207/Project')
require(gam)
require(rpart)
require(rpart.plot)
require(randomForest)
require(boot)
save = T # save plots?
data = read.csv('Data/games_2015_tournament.csv',header=T,as.is=T)
data = subset(data, game_group==1)
data = within(data, {
location_Home <- location_Home + location_SemiHome
location_Away <- location_Away + location_SemiAway
win <- factor(win)
})
train = subset(data, ncaa_tournament==0)
n_train = nrow(train); n_train
test = subset(data, ncaa_tournament==1)
n_test = nrow(test); n_test
# run random forest to see importance
features = grep('^diff_[a-zA-Z]+$', names(data), value=T) # excl. higher order terms
f = formula(paste0('win~location_Home+location_Away+',
paste(features, collapse='+')))
rf0 = randomForest(f,data=train,ntree=500,importance=T)
# plot importance
rf0_importance = sort(importance(rf0,type=1)[,1], decreasing=F)
if (save) {
filename = "Plots/RF Importance.png"
png(filename,height=200*1.44,width=300*1.44,pointsize=12)
}
par(mar=c(4, 9, 2, 2), oma=rep(0, 4),cex=1.1)
barplot(rf0_importance,las=1,horiz=T,
main='Random Forest Predictor Importance',xlab='Importance')
if (save) dev.off()
# # run decision tree on 2015 regular season
# tree0 = rpart(f, data=train, method="class", parms=list(split="gini"))
# tree0 = rpart(f, data=train, method="class", parms=list(split="gini"),
# control=rpart.control(minsplit=2,cp=0.001))
# prp(tree0,type=0,extra=106,digits=4)
# if (save) {
# filename = "Class Tree CP.png"
# png(filename,height=200,width=300,pointsize=10)
# }
# par(mar=c(4, 4, 2, 2), oma=rep(0, 4), cex=1)
# # plotcp(tree0,ylim=c(0.4,0.8))
# plotcp(tree0)
# if (save) dev.off()
#
# # prune, print, and display tree
# tree1 = prune(tree0, cp=0.027)
# tree1
# if (save) {
# filename = "Class Tree Pruned.png"
# png(filename,height=200,width=300,pointsize=10)
# }
# par(mar=c(4, 4, 2, 2), oma=rep(0, 4), cex=1)
# prp(tree1,type=0,extra=106,digits=4)
# if (save) dev.off()
#
# # fit gam
# gam0 = gam(win~s(diff_Pythag), data=train, family=binomial)
# summary(gam0)
#
# if (save) {
# filename = "GAM Smooths.png"
# png(filename,height=200,width=300,pointsize=10)
# }
# par(mfrow=c(3,1), mar=c(4, 4, 2, 2), oma=rep(0, 4), cex=1)
# plot(gam0, resid=T, rug=F, se=T, pch=20, col="red")
# if (save) dev.off()
#
# # test smoothing effect
# glm0 = glm(win~diff_Pythag, data=train, family=binomial)
# anova(glm0, gam0, test='Chi')
#
# # add 2nd and 3rd order terms
# glm1 = glm(win~diff_Pythag+diff_Pythag.2, data=train, family=binomial)
# glm2 = glm(win~diff_Pythag+diff_Pythag.2+diff_Pythag.3,
# data=train, family=binomial)
# glm3 = glm(win~diff_Pythag+diff_Pythag.2+diff_Pythag.3+diff_Pythag.4,
# data=train, family=binomial)
# anova(glm0, glm1, test='Chi') # 2nd not significant
# anova(glm0, glm2, test='Chi') # 3rd significant relative to null
# anova(glm2, glm3, test='Chi') # 4th not significant relative to 3rd
#
# # add up to 3rd order
# pred.x = test[,-which(names(test)=='win')]
# pred.y = test[,'win']
# pred = predict(glm2, pred.x, type='response')
# pred = ifelse(pred > 0.5,1,0)
# accuracy = mean(pred==pred.y)
# cost function
accuracy = function(y,pred) {
mean(y==ifelse(pred>0.5,1,0))
}
run = T
# use list of most important features for variable selection
if (run) {
rf0_importance = sort(rf0_importance, decreasing=T)
scores = rep(NA,length(rf0_importance))
start = Sys.time()
for (i in 1:length(rf0_importance)) {
print(paste0('i=',i))
features = names(rf0_importance)[1:i]
f = formula(paste0('win~', paste(features, collapse='+')))
glm.mod <- glm(f,data=train,family=binomial)
# k-fold cross validation to select features
scores[i] = cv.glm(train, glm.mod, cost=accuracy, K=10)$delta[1]
}
end = Sys.time()
print(runtime <- end-start)
}
# plot scores
if (save) {
filename = "Plots/XVal Scores.png"
png(filename,height=200*1.44,width=300*1.44,pointsize=12)
}
par(mar=c(4, 4, 2, 2), oma=rep(0, 4), cex=1)
plot(scores,type='b',pch=20, ylim=c(0.74, 0.78),
main='10-Fold Cross-Validation Results',
ylab='Cross-Validation Score',
xlab='Number of Most Important Predictors')
if (save) dev.off()
# # test set
# out.scores = rep(NA,length(rf0_importance))
# for (i in 1:length(rf0_importance)) {
# features = names(rf0_importance)[1:i]
# f = formula(paste0('win~', paste(features, collapse='+')))
# glm.mod <- glm(f,data=train,family=binomial)
# pred.x = test[,-which(names(test)=='win')]
# pred.y = test[,'win']
# pred = predict(glm.mod, pred.x, type='response')
# pred = ifelse(pred > 0.5,1,0)
# accuracy = mean(pred==pred.y)
# out.scores[i] = accuracy
# }
# plot(out.scores,type='b',pch=20,ylim=c(0.7,0.8))
# points(scores, type='b',lty=2,pch=1,col='red')
# get priors
i = 5 #0.7611940
features = names(rf0_importance)[1:i]
print(features)
data = read.csv('Data/games.csv',header=T,as.is=T)
data = subset(data, game_group==1)
data = within(data, {
location_Home <- location_Home + location_SemiHome
location_Away <- location_Away + location_SemiAway
win <- factor(win)
})
f = formula(paste0('win~', paste(features, collapse='+')))
glm.prior = glm(f,family=binomial,data=data,subset=year==2014)
priors = summary(glm.prior)$coefficients
write.csv(priors,file='Priors.csv')
# try MCMC
| /src/model/Variable Selection.R | no_license | micahlanier/bayesket-ball | R | false | false | 5,408 | r | # STAT149
# Shi (Stephen) Fang
setwd('/Users/sfang/Dropbox/Courses/Spring 2015/AM207/Project')
require(gam)
require(rpart)
require(rpart.plot)
require(randomForest)
require(boot)
save = T # save plots?
data = read.csv('Data/games_2015_tournament.csv',header=T,as.is=T)
data = subset(data, game_group==1)
data = within(data, {
location_Home <- location_Home + location_SemiHome
location_Away <- location_Away + location_SemiAway
win <- factor(win)
})
train = subset(data, ncaa_tournament==0)
n_train = nrow(train); n_train
test = subset(data, ncaa_tournament==1)
n_test = nrow(test); n_test
# run random forest to see importance
features = grep('^diff_[a-zA-Z]+$', names(data), value=T) # excl. higher order terms
f = formula(paste0('win~location_Home+location_Away+',
paste(features, collapse='+')))
rf0 = randomForest(f,data=train,ntree=500,importance=T)
# plot importance
rf0_importance = sort(importance(rf0,type=1)[,1], decreasing=F)
if (save) {
filename = "Plots/RF Importance.png"
png(filename,height=200*1.44,width=300*1.44,pointsize=12)
}
par(mar=c(4, 9, 2, 2), oma=rep(0, 4),cex=1.1)
barplot(rf0_importance,las=1,horiz=T,
main='Random Forest Predictor Importance',xlab='Importance')
if (save) dev.off()
# # run decision tree on 2015 regular season
# tree0 = rpart(f, data=train, method="class", parms=list(split="gini"))
# tree0 = rpart(f, data=train, method="class", parms=list(split="gini"),
# control=rpart.control(minsplit=2,cp=0.001))
# prp(tree0,type=0,extra=106,digits=4)
# if (save) {
# filename = "Class Tree CP.png"
# png(filename,height=200,width=300,pointsize=10)
# }
# par(mar=c(4, 4, 2, 2), oma=rep(0, 4), cex=1)
# # plotcp(tree0,ylim=c(0.4,0.8))
# plotcp(tree0)
# if (save) dev.off()
#
# # prune, print, and display tree
# tree1 = prune(tree0, cp=0.027)
# tree1
# if (save) {
# filename = "Class Tree Pruned.png"
# png(filename,height=200,width=300,pointsize=10)
# }
# par(mar=c(4, 4, 2, 2), oma=rep(0, 4), cex=1)
# prp(tree1,type=0,extra=106,digits=4)
# if (save) dev.off()
#
# # fit gam
# gam0 = gam(win~s(diff_Pythag), data=train, family=binomial)
# summary(gam0)
#
# if (save) {
# filename = "GAM Smooths.png"
# png(filename,height=200,width=300,pointsize=10)
# }
# par(mfrow=c(3,1), mar=c(4, 4, 2, 2), oma=rep(0, 4), cex=1)
# plot(gam0, resid=T, rug=F, se=T, pch=20, col="red")
# if (save) dev.off()
#
# # test smoothing effect
# glm0 = glm(win~diff_Pythag, data=train, family=binomial)
# anova(glm0, gam0, test='Chi')
#
# # add 2nd and 3rd order terms
# glm1 = glm(win~diff_Pythag+diff_Pythag.2, data=train, family=binomial)
# glm2 = glm(win~diff_Pythag+diff_Pythag.2+diff_Pythag.3,
# data=train, family=binomial)
# glm3 = glm(win~diff_Pythag+diff_Pythag.2+diff_Pythag.3+diff_Pythag.4,
# data=train, family=binomial)
# anova(glm0, glm1, test='Chi') # 2nd not significant
# anova(glm0, glm2, test='Chi') # 3rd significant relative to null
# anova(glm2, glm3, test='Chi') # 4th not significant relative to 3rd
#
# # add up to 3rd order
# pred.x = test[,-which(names(test)=='win')]
# pred.y = test[,'win']
# pred = predict(glm2, pred.x, type='response')
# pred = ifelse(pred > 0.5,1,0)
# accuracy = mean(pred==pred.y)
# cost function
accuracy = function(y,pred) {
mean(y==ifelse(pred>0.5,1,0))
}
run = T
# use list of most important features for variable selection
if (run) {
rf0_importance = sort(rf0_importance, decreasing=T)
scores = rep(NA,length(rf0_importance))
start = Sys.time()
for (i in 1:length(rf0_importance)) {
print(paste0('i=',i))
features = names(rf0_importance)[1:i]
f = formula(paste0('win~', paste(features, collapse='+')))
glm.mod <- glm(f,data=train,family=binomial)
# k-fold cross validation to select features
scores[i] = cv.glm(train, glm.mod, cost=accuracy, K=10)$delta[1]
}
end = Sys.time()
print(runtime <- end-start)
}
# plot scores
if (save) {
filename = "Plots/XVal Scores.png"
png(filename,height=200*1.44,width=300*1.44,pointsize=12)
}
par(mar=c(4, 4, 2, 2), oma=rep(0, 4), cex=1)
plot(scores,type='b',pch=20, ylim=c(0.74, 0.78),
main='10-Fold Cross-Validation Results',
ylab='Cross-Validation Score',
xlab='Number of Most Important Predictors')
if (save) dev.off()
# # test set
# out.scores = rep(NA,length(rf0_importance))
# for (i in 1:length(rf0_importance)) {
# features = names(rf0_importance)[1:i]
# f = formula(paste0('win~', paste(features, collapse='+')))
# glm.mod <- glm(f,data=train,family=binomial)
# pred.x = test[,-which(names(test)=='win')]
# pred.y = test[,'win']
# pred = predict(glm.mod, pred.x, type='response')
# pred = ifelse(pred > 0.5,1,0)
# accuracy = mean(pred==pred.y)
# out.scores[i] = accuracy
# }
# plot(out.scores,type='b',pch=20,ylim=c(0.7,0.8))
# points(scores, type='b',lty=2,pch=1,col='red')
# get priors
i = 5 #0.7611940
features = names(rf0_importance)[1:i]
print(features)
data = read.csv('Data/games.csv',header=T,as.is=T)
data = subset(data, game_group==1)
data = within(data, {
location_Home <- location_Home + location_SemiHome
location_Away <- location_Away + location_SemiAway
win <- factor(win)
})
f = formula(paste0('win~', paste(features, collapse='+')))
glm.prior = glm(f,family=binomial,data=data,subset=year==2014)
priors = summary(glm.prior)$coefficients
write.csv(priors,file='Priors.csv')
# try MCMC
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map.R
\name{mapImage}
\alias{mapImage}
\title{Add an Image to a Map}
\usage{
mapImage(longitude, latitude, z, zlim, zclip = FALSE, breaks, col, colormap,
border = NA, lwd = par("lwd"), lty = par("lty"), missingColor = NA,
filledContour = FALSE, gridder = "binMean2D",
debug = getOption("oceDebug"))
}
\arguments{
\item{longitude}{vector of longitudes corresponding to \code{z} matrix.}
\item{latitude}{vector of latitudes corresponding to \code{z} matrix.}
\item{z}{matrix to be represented as an image.}
\item{zlim}{limit for z (colour).}
\item{zclip}{A logical value, \code{TRUE} indicating that out-of-range
\code{z} values should be painted with \code{missingColor} and \code{FALSE}
indicating that these values should be painted with the nearest
in-range colour. If \code{zlim} is given then its min and max set the
range. If \code{zlim} is not given but \code{breaks} is given, then
the min and max of \code{breaks} sets the range used for z. If neither
\code{zlim} nor \code{breaks} is given, clipping is not done, i.e. the
action is as if \code{zclip} were \code{FALSE}.}
\item{breaks}{The z values for breaks in the colour scheme. If this is of
length 1, the value indicates the desired number of breaks, which is
supplied to \code{\link{pretty}}, in determining clean break points.}
\item{col}{Either a vector of colours corresponding to the breaks, of length
1 plus the number of breaks, or a function specifying colours,
e.g. \code{\link{oce.colorsJet}} for a rainbow.}
\item{colormap}{optional colormap, as created by \code{\link{colormap}}.
If a \code{colormap} is provided, then its properties takes precedence
over \code{breaks}, \code{col}, \code{missingColor}, and \code{zclip}
specified to \code{mapImage}.}
\item{border}{Colour used for borders of patches (passed to
\code{\link{polygon}}); the default \code{NA} means no border.}
\item{lwd}{line width, used if borders are drawn.}
\item{lty}{line type, used if borders are drawn.}
\item{missingColor}{a colour to be used to indicate missing data, or
\code{NA} to skip the drawing of such regions (which will retain
whatever material has already been drawn at the regions).}
\item{filledContour}{either a logical value indicating whether to use
filled contours to plot the image, or a numerical value indicating the
resampling rate to be used in interpolating from lon-lat coordinates to
x-y coordinates. See \dQuote{Details} for how this interacts with
\code{gridder}.}
\item{gridder}{Name of gridding function used if \code{filledContour} is
\code{TRUE}. This can be either \code{"binMean2D"} to select
\code{\link{binMean2D}} or \code{"interp"} for
\code{\link[akima]{interp}}. If not provided, then a selection is made
automatically, with \code{\link{binMean2D}} being used if there are
more than 10,000 data points in the present graphical view. This
\code{"binMean2D"} method is much faster than \code{"interp"}.}
\item{debug}{A flag that turns on debugging. Set to 1 to get a
moderate amount of debugging information, or to 2 to get more.}
}
\description{
Plot an image on an existing map.
}
\details{
Adds an image to an existing map, by analogy to \code{\link{image}}.
The data are on a regular grid in lon-lat space, but not in the projected
x-y space. This means that \code{\link{image}} cannot be used. Instead,
there are two approaches, depending on the value of \code{filledContour}.
If \code{filledContour} is \code{FALSE}, the image ``pixels'' are with
\code{\link{polygon}}, which can be prohibitively slow for fine grids.
However, if \code{filledContour} is \code{TRUE} or a numerical value, then the
``pixels'' are remapped into a regular grid and then displayed with
\code{\link{.filled.contour}}. The remapping starts by converting the
regular lon-lat grid to an irregular x-y grid using
\code{\link{lonlat2map}}. This irregular grid is then interpolated onto a
regular x-y grid with \code{\link{binMean2D}} or with
\code{\link[akima]{interp}} from the \code{akima} package, as determined by
the \code{gridder} argument. If \code{filledContour} is \code{TRUE}, the
dimensions of the regular x-y grid is the same as that of the original
lon-lat grid; otherwise, the number of rows and columns are multiplied by
the numerical value of \code{filledContour}, e.g. the value 2 means to make
the grid twice as fine.
Filling contours can produce aesthetically-pleasing results, but the method
involves interpolation, so the data are not represented exactly and
analysts are advised to compare the results from the two methods (and
perhaps various grid refinement values) to guard against misinterpretation.
If a \code{\link{png}} device is to be used, it is advised to supply
arguments \code{type="cairo"} and \code{antialias="none"}; see [1].
}
\examples{
\dontrun{
library(oce)
data(coastlineWorld)
data(topoWorld)
par(mfrow=c(2,1), mar=c(2, 2, 1, 1))
lonlim <- c(-70,-50)
latlim <- c(40,50)
topo <- decimate(topoWorld, by=2) # coarse to illustrate filled contours
topo <- subset(topo, latlim[1] < latitude & latitude < latlim[2])
topo <- subset(topo, lonlim[1] < longitude & longitude < lonlim[2])
mapPlot(coastlineWorld, type='l',
longitudelim=lonlim, latitudelim=latlim,
projection="+proj=lcc +lat_1=40 +lat_2=50 +lon_0=-60")
breaks <- seq(-5000, 1000, 500)
mapImage(topo, col=oce.colorsGebco, breaks=breaks)
mapLines(coastlineWorld)
box()
mapPlot(coastlineWorld, type='l',
longitudelim=lonlim, latitudelim=latlim,
projection="+proj=lcc +lat_1=40 +lat_2=50 +lon_0=-60")
mapImage(topo, filledContour=TRUE, col=oce.colorsGebco, breaks=breaks)
box()
mapLines(coastlineWorld)
## Northern polar region, with colour-coded bathymetry
par(mfrow=c(1,1))
drawPalette(c(-5000,0), zlim=c(-5000, 0), col=oce.colorsJet)
mapPlot(coastlineWorld, projection="+proj=stere +lat_0=90",
longitudelim=c(-180,180), latitudelim=c(60,120))
mapImage(topoWorld, zlim=c(-5000, 0), col=oce.colorsJet)
mapLines(coastlineWorld[['longitude']], coastlineWorld[['latitude']])
# Levitus SST
par(mfrow=c(1,1))
data(levitus, package='ocedata')
lon <- levitus$longitude
lat <- levitus$latitude
SST <- levitus$SST
par(mar=rep(1, 4))
Tlim <- c(-2, 30)
drawPalette(Tlim, col=oce.colorsJet)
mapPlot(coastlineWorld, projection="+proj=moll", grid=FALSE)
mapImage(lon, lat, SST, col=oce.colorsJet, zlim=Tlim)
mapPolygon(coastlineWorld, col='gray')
}
}
\author{
Dan Kelley
}
\references{
1. \url{http://codedocean.wordpress.com/2014/02/03/anti-aliasing-and-image-plots/}
}
\seealso{
A map must first have been created with \code{\link{mapPlot}}.
Other functions related to maps: \code{\link{lonlat2map}},
\code{\link{lonlat2utm}}, \code{\link{map2lonlat}},
\code{\link{mapArrows}}, \code{\link{mapAxis}},
\code{\link{mapContour}},
\code{\link{mapDirectionField}}, \code{\link{mapGrid}},
\code{\link{mapLines}}, \code{\link{mapLocator}},
\code{\link{mapLongitudeLatitudeXY}},
\code{\link{mapMeridians}}, \code{\link{mapPlot}},
\code{\link{mapPoints}}, \code{\link{mapPolygon}},
\code{\link{mapScalebar}}, \code{\link{mapText}},
\code{\link{mapTissot}}, \code{\link{mapZones}},
\code{\link{shiftLongitude}}, \code{\link{utm2lonlat}}
}
| /man/mapImage.Rd | no_license | AnneMTreasure/oce | R | false | true | 7,260 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map.R
\name{mapImage}
\alias{mapImage}
\title{Add an Image to a Map}
\usage{
mapImage(longitude, latitude, z, zlim, zclip = FALSE, breaks, col, colormap,
border = NA, lwd = par("lwd"), lty = par("lty"), missingColor = NA,
filledContour = FALSE, gridder = "binMean2D",
debug = getOption("oceDebug"))
}
\arguments{
\item{longitude}{vector of longitudes corresponding to \code{z} matrix.}
\item{latitude}{vector of latitudes corresponding to \code{z} matrix.}
\item{z}{matrix to be represented as an image.}
\item{zlim}{limit for z (colour).}
\item{zclip}{A logical value, \code{TRUE} indicating that out-of-range
\code{z} values should be painted with \code{missingColor} and \code{FALSE}
indicating that these values should be painted with the nearest
in-range colour. If \code{zlim} is given then its min and max set the
range. If \code{zlim} is not given but \code{breaks} is given, then
the min and max of \code{breaks} sets the range used for z. If neither
\code{zlim} nor \code{breaks} is given, clipping is not done, i.e. the
action is as if \code{zclip} were \code{FALSE}.}
\item{breaks}{The z values for breaks in the colour scheme. If this is of
length 1, the value indicates the desired number of breaks, which is
supplied to \code{\link{pretty}}, in determining clean break points.}
\item{col}{Either a vector of colours corresponding to the breaks, of length
1 plus the number of breaks, or a function specifying colours,
e.g. \code{\link{oce.colorsJet}} for a rainbow.}
\item{colormap}{optional colormap, as created by \code{\link{colormap}}.
If a \code{colormap} is provided, then its properties takes precedence
over \code{breaks}, \code{col}, \code{missingColor}, and \code{zclip}
specified to \code{mapImage}.}
\item{border}{Colour used for borders of patches (passed to
\code{\link{polygon}}); the default \code{NA} means no border.}
\item{lwd}{line width, used if borders are drawn.}
\item{lty}{line type, used if borders are drawn.}
\item{missingColor}{a colour to be used to indicate missing data, or
\code{NA} to skip the drawing of such regions (which will retain
whatever material has already been drawn at the regions).}
\item{filledContour}{either a logical value indicating whether to use
filled contours to plot the image, or a numerical value indicating the
resampling rate to be used in interpolating from lon-lat coordinates to
x-y coordinates. See \dQuote{Details} for how this interacts with
\code{gridder}.}
\item{gridder}{Name of gridding function used if \code{filledContour} is
\code{TRUE}. This can be either \code{"binMean2D"} to select
\code{\link{binMean2D}} or \code{"interp"} for
\code{\link[akima]{interp}}. If not provided, then a selection is made
automatically, with \code{\link{binMean2D}} being used if there are
more than 10,000 data points in the present graphical view. This
\code{"binMean2D"} method is much faster than \code{"interp"}.}
\item{debug}{A flag that turns on debugging. Set to 1 to get a
moderate amount of debugging information, or to 2 to get more.}
}
\description{
Plot an image on an existing map.
}
\details{
Adds an image to an existing map, by analogy to \code{\link{image}}.
The data are on a regular grid in lon-lat space, but not in the projected
x-y space. This means that \code{\link{image}} cannot be used. Instead,
there are two approaches, depending on the value of \code{filledContour}.
If \code{filledContour} is \code{FALSE}, the image ``pixels'' are with
\code{\link{polygon}}, which can be prohibitively slow for fine grids.
However, if \code{filledContour} is \code{TRUE} or a numerical value, then the
``pixels'' are remapped into a regular grid and then displayed with
\code{\link{.filled.contour}}. The remapping starts by converting the
regular lon-lat grid to an irregular x-y grid using
\code{\link{lonlat2map}}. This irregular grid is then interpolated onto a
regular x-y grid with \code{\link{binMean2D}} or with
\code{\link[akima]{interp}} from the \code{akima} package, as determined by
the \code{gridder} argument. If \code{filledContour} is \code{TRUE}, the
dimensions of the regular x-y grid is the same as that of the original
lon-lat grid; otherwise, the number of rows and columns are multiplied by
the numerical value of \code{filledContour}, e.g. the value 2 means to make
the grid twice as fine.
Filling contours can produce aesthetically-pleasing results, but the method
involves interpolation, so the data are not represented exactly and
analysts are advised to compare the results from the two methods (and
perhaps various grid refinement values) to guard against misinterpretation.
If a \code{\link{png}} device is to be used, it is advised to supply
arguments \code{type="cairo"} and \code{antialias="none"}; see [1].
}
\examples{
\dontrun{
library(oce)
data(coastlineWorld)
data(topoWorld)
par(mfrow=c(2,1), mar=c(2, 2, 1, 1))
lonlim <- c(-70,-50)
latlim <- c(40,50)
topo <- decimate(topoWorld, by=2) # coarse to illustrate filled contours
topo <- subset(topo, latlim[1] < latitude & latitude < latlim[2])
topo <- subset(topo, lonlim[1] < longitude & longitude < lonlim[2])
mapPlot(coastlineWorld, type='l',
longitudelim=lonlim, latitudelim=latlim,
projection="+proj=lcc +lat_1=40 +lat_2=50 +lon_0=-60")
breaks <- seq(-5000, 1000, 500)
mapImage(topo, col=oce.colorsGebco, breaks=breaks)
mapLines(coastlineWorld)
box()
mapPlot(coastlineWorld, type='l',
longitudelim=lonlim, latitudelim=latlim,
projection="+proj=lcc +lat_1=40 +lat_2=50 +lon_0=-60")
mapImage(topo, filledContour=TRUE, col=oce.colorsGebco, breaks=breaks)
box()
mapLines(coastlineWorld)
## Northern polar region, with colour-coded bathymetry
par(mfrow=c(1,1))
drawPalette(c(-5000,0), zlim=c(-5000, 0), col=oce.colorsJet)
mapPlot(coastlineWorld, projection="+proj=stere +lat_0=90",
longitudelim=c(-180,180), latitudelim=c(60,120))
mapImage(topoWorld, zlim=c(-5000, 0), col=oce.colorsJet)
mapLines(coastlineWorld[['longitude']], coastlineWorld[['latitude']])
# Levitus SST
par(mfrow=c(1,1))
data(levitus, package='ocedata')
lon <- levitus$longitude
lat <- levitus$latitude
SST <- levitus$SST
par(mar=rep(1, 4))
Tlim <- c(-2, 30)
drawPalette(Tlim, col=oce.colorsJet)
mapPlot(coastlineWorld, projection="+proj=moll", grid=FALSE)
mapImage(lon, lat, SST, col=oce.colorsJet, zlim=Tlim)
mapPolygon(coastlineWorld, col='gray')
}
}
\author{
Dan Kelley
}
\references{
1. \url{http://codedocean.wordpress.com/2014/02/03/anti-aliasing-and-image-plots/}
}
\seealso{
A map must first have been created with \code{\link{mapPlot}}.
Other functions related to maps: \code{\link{lonlat2map}},
\code{\link{lonlat2utm}}, \code{\link{map2lonlat}},
\code{\link{mapArrows}}, \code{\link{mapAxis}},
\code{\link{mapContour}},
\code{\link{mapDirectionField}}, \code{\link{mapGrid}},
\code{\link{mapLines}}, \code{\link{mapLocator}},
\code{\link{mapLongitudeLatitudeXY}},
\code{\link{mapMeridians}}, \code{\link{mapPlot}},
\code{\link{mapPoints}}, \code{\link{mapPolygon}},
\code{\link{mapScalebar}}, \code{\link{mapText}},
\code{\link{mapTissot}}, \code{\link{mapZones}},
\code{\link{shiftLongitude}}, \code{\link{utm2lonlat}}
}
|
# Coursera Exploratory Data Analysis
# Project_1
# plot_2
library("dplyr")
setwd("D:\\Exploratory_Data_Analysis\\Project_1")
Data_00 <- read.table("D:\\Exploratory_Data_Analysis\\Project_1\\household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors=FALSE)
Data_00$Date <- as.Date(Data_00$Date, format="%d/%m/%Y")
Data_01 <- filter(Data_00 , Date=="2007-02-01")
Data_02 <- filter(Data_00 , Date=="2007-02-02")
Data_03 <- rbind(Data_01, Data_02)
Data_03$Global_active_power <- as.numeric(Data_03$Global_active_power)
Date_Time <- strptime(paste(Data_03$Date, Data_03$Time), "%Y-%m-%d %H:%M:%S")
par(mfcol = c(1,1))
################# plot 2 #################
png( "plot2.png", width=480, height=480, units='px')
plot_2 <- plot(Date_Time, Data_03$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
invisible(dev.off())
| /plot2.R | no_license | chyb/Coursera-Exploratory-Data-Analysis | R | false | false | 872 | r |
# Coursera Exploratory Data Analysis
# Project_1
# plot_2
library("dplyr")
setwd("D:\\Exploratory_Data_Analysis\\Project_1")
Data_00 <- read.table("D:\\Exploratory_Data_Analysis\\Project_1\\household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors=FALSE)
Data_00$Date <- as.Date(Data_00$Date, format="%d/%m/%Y")
Data_01 <- filter(Data_00 , Date=="2007-02-01")
Data_02 <- filter(Data_00 , Date=="2007-02-02")
Data_03 <- rbind(Data_01, Data_02)
Data_03$Global_active_power <- as.numeric(Data_03$Global_active_power)
Date_Time <- strptime(paste(Data_03$Date, Data_03$Time), "%Y-%m-%d %H:%M:%S")
par(mfcol = c(1,1))
################# plot 2 #################
png( "plot2.png", width=480, height=480, units='px')
plot_2 <- plot(Date_Time, Data_03$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
invisible(dev.off())
|
library(pvsR)
### Name: Rating.getSig
### Title: Get detailed information about a special interest group
### Aliases: Rating.getSig
### ** Examples
# First, make sure your personal PVS API key is saved as character string in the pvs.key variable:
## Not run: pvs.key <- "yourkey"
# get information about certain special interest groups
## Not run: info <- Rating.getSig(list(1016,1120))
## Not run: info
| /data/genthat_extracted_code/pvsR/examples/Rating.getSig.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 411 | r | library(pvsR)
### Name: Rating.getSig
### Title: Get detailed information about a special interest group
### Aliases: Rating.getSig
### ** Examples
# First, make sure your personal PVS API key is saved as character string in the pvs.key variable:
## Not run: pvs.key <- "yourkey"
# get information about certain special interest groups
## Not run: info <- Rating.getSig(list(1016,1120))
## Not run: info
|
## This function creates a special "matrix" object that can cache its inverse
## A pair of functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()){
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | numan-sharif/ProgrammingAssignment2 | R | false | false | 879 | r | ## This function creates a special "matrix" object that can cache its inverse
## A pair of functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()){
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
|
#' Plot andeler/rater i stabelplot
#'
#' Denne funksjonen tar som input en dataramme med andeler av tre kategorier
#' som til sammen summerer til 100%
#'
#' @param plotdata En dataramme med rater/andeler i spesifisert form
#' @param outfile Angir filnavn og format på figuren som returneres,
#'
#' @return Et plot av rater over tre år
#' @export
#'
indikatorFigAndelStabelGrVar_justert <- function(Antall, outfile='', tittel='Tittel', sideTxt='Boområde/opptaksområde', terskel=30,
width=600, height=600, tertiler = c(-1,51,66,140))
{
Antall[is.na(Antall)] <- 0
p <- cbind(rowSums(Antall[dim(Antall[1])[1] , c(2, 5, 8)]),
rowSums(Antall[dim(Antall[1])[1] , c(3, 6, 9)]),
rowSums(Antall[dim(Antall[1])[1] , c(4, 7, 10)]))
p <- p/sum(p)
andeler_ujust_gr1 <- Antall[ , c(2, 5, 8)]/rowSums(Antall[ , c(2, 5, 8)])
andeler_ujust_gr2 <- Antall[ , c(3, 6, 9)]/rowSums(Antall[ , c(3, 6, 9)])
andeler_ujust_gr3 <- Antall[ , c(4, 7, 10)]/rowSums(Antall[ , c(4, 7, 10)])
andeler_justert <- andeler_ujust_gr1*p[1] + andeler_ujust_gr2*p[2] + andeler_ujust_gr3*p[3]
andeler <- andeler_justert *100
rownames(andeler) <- Antall[,1]
N <- rowSums(Antall[, 2:10])
N_tot <- rowSums(Antall[, 2:13])
andeler[which(N<terskel), ] <- NA
rekkefolg <- order(andeler[,3], decreasing = T, na.last = F)
# rekkefolg <- order(andeler[,3], decreasing = T)
andeler <- andeler[rekkefolg, ]
N <- N[rekkefolg]
N_tot <- N_tot[rekkefolg]
# radnavn <- paste0(rownames(andeler), ' (N=', N, ')')
radnavn <- rownames(andeler)
names(andeler) <- substr(names(andeler),1,nchar(names(andeler))-8)
FigTypUt <- rapbase::figtype(outfile='', width=width, height=height, pointsizePDF=11, fargepalett='BlaaOff')
farger <- FigTypUt$farger
# if (outfile == '') {windows(width = width, height = height)}
windows(width = width, height = height)
oldpar_mar <- par()$mar
oldpar_fig <- par()$fig
cexgr <- 1.3
vmarg <- max(0, strwidth(radnavn, units='figure', cex=cexgr)*0.8)
# hmarg <- max(0, 3*strwidth(max(N), units='figure', cex=cexgr)*0.7)
par('fig'=c(vmarg, 1, 0, 1))
par('mar'=c(5.1, 4.1, 4.1, 6.1))
ypos <- barplot(t(as.matrix(andeler)), horiz=T, beside=FALSE, border=NA, main=tittel,
names.arg=rep('',dim(andeler)[1]), font.main=1, cex.main=1.3, xlab='Andel (%)',
las=1, col=farger[c(1,3,4)])
ypos <- as.vector(ypos)
mtext( radnavn, side=2, line=0.2, las=1, at=ypos, col=1, cex=cexgr)
mtext( c(N, 'N'), side=4, line=2.5, las=1, at=c(ypos, max(ypos)+diff(ypos)[1]), col=1, cex=cexgr, adj = 1)
mtext( c(paste0(round(rowSums(Antall[rekkefolg, 11:13])/N_tot*100, 0), ' %'), 'Ukjent'), side=4, line=6.0, las=1,
at=c(ypos, max(ypos)+diff(ypos)[1]), col=1, cex=cexgr, adj = 1)
mtext(text = sideTxt, side=2, line=10.5, las=0, col=1, cex=cexgr)
text(x=andeler[,1], y=ypos, labels = paste0(round(andeler[,1]), '%'), cex=0.9, pos=2, col='white')
text(x=(andeler[,1]+andeler[,2]), y=ypos, labels = paste0(round(andeler[,2]), ' %'), cex=0.9, pos=2)
text(x=rep(100, length(andeler[3])), y=ypos, labels = paste0(round(andeler[,3]), ' %'), cex=0.9, pos=2)
if (length(which(is.na(andeler[,1]))) > 0){
text(x=0, y=ypos[1:length(which(is.na(andeler[,1])))], labels = paste0('N < ', terskel), cex=0.9, pos=2)
}
par(xpd=TRUE)
legend('top', inset=c(vmarg,-.03), names(andeler), fill = farger[c(1,3,4)], ncol = 3, border = farger[c(1,3,4)],
bty = 'n', cex = 0.9)
par('mar'= oldpar_mar)
par('fig'= oldpar_fig)
par(xpd=FALSE)
# if (outfile != '') {dev.off()}
if (outfile != '') {savePlot(outfile, type=substr(outfile, nchar(outfile)-2, nchar(outfile)))}
}
| /R/indikatorFigAndelStabelGrVar_justert.R | no_license | SKDE-Felles/indikatoR | R | false | false | 3,734 | r | #' Plot andeler/rater i stabelplot
#'
#' Denne funksjonen tar som input en dataramme med andeler av tre kategorier
#' som til sammen summerer til 100%
#'
#' @param plotdata En dataramme med rater/andeler i spesifisert form
#' @param outfile Angir filnavn og format på figuren som returneres,
#'
#' @return Et plot av rater over tre år
#' @export
#'
indikatorFigAndelStabelGrVar_justert <- function(Antall, outfile='', tittel='Tittel', sideTxt='Boområde/opptaksområde', terskel=30,
width=600, height=600, tertiler = c(-1,51,66,140))
{
Antall[is.na(Antall)] <- 0
p <- cbind(rowSums(Antall[dim(Antall[1])[1] , c(2, 5, 8)]),
rowSums(Antall[dim(Antall[1])[1] , c(3, 6, 9)]),
rowSums(Antall[dim(Antall[1])[1] , c(4, 7, 10)]))
p <- p/sum(p)
andeler_ujust_gr1 <- Antall[ , c(2, 5, 8)]/rowSums(Antall[ , c(2, 5, 8)])
andeler_ujust_gr2 <- Antall[ , c(3, 6, 9)]/rowSums(Antall[ , c(3, 6, 9)])
andeler_ujust_gr3 <- Antall[ , c(4, 7, 10)]/rowSums(Antall[ , c(4, 7, 10)])
andeler_justert <- andeler_ujust_gr1*p[1] + andeler_ujust_gr2*p[2] + andeler_ujust_gr3*p[3]
andeler <- andeler_justert *100
rownames(andeler) <- Antall[,1]
N <- rowSums(Antall[, 2:10])
N_tot <- rowSums(Antall[, 2:13])
andeler[which(N<terskel), ] <- NA
rekkefolg <- order(andeler[,3], decreasing = T, na.last = F)
# rekkefolg <- order(andeler[,3], decreasing = T)
andeler <- andeler[rekkefolg, ]
N <- N[rekkefolg]
N_tot <- N_tot[rekkefolg]
# radnavn <- paste0(rownames(andeler), ' (N=', N, ')')
radnavn <- rownames(andeler)
names(andeler) <- substr(names(andeler),1,nchar(names(andeler))-8)
FigTypUt <- rapbase::figtype(outfile='', width=width, height=height, pointsizePDF=11, fargepalett='BlaaOff')
farger <- FigTypUt$farger
# if (outfile == '') {windows(width = width, height = height)}
windows(width = width, height = height)
oldpar_mar <- par()$mar
oldpar_fig <- par()$fig
cexgr <- 1.3
vmarg <- max(0, strwidth(radnavn, units='figure', cex=cexgr)*0.8)
# hmarg <- max(0, 3*strwidth(max(N), units='figure', cex=cexgr)*0.7)
par('fig'=c(vmarg, 1, 0, 1))
par('mar'=c(5.1, 4.1, 4.1, 6.1))
ypos <- barplot(t(as.matrix(andeler)), horiz=T, beside=FALSE, border=NA, main=tittel,
names.arg=rep('',dim(andeler)[1]), font.main=1, cex.main=1.3, xlab='Andel (%)',
las=1, col=farger[c(1,3,4)])
ypos <- as.vector(ypos)
mtext( radnavn, side=2, line=0.2, las=1, at=ypos, col=1, cex=cexgr)
mtext( c(N, 'N'), side=4, line=2.5, las=1, at=c(ypos, max(ypos)+diff(ypos)[1]), col=1, cex=cexgr, adj = 1)
mtext( c(paste0(round(rowSums(Antall[rekkefolg, 11:13])/N_tot*100, 0), ' %'), 'Ukjent'), side=4, line=6.0, las=1,
at=c(ypos, max(ypos)+diff(ypos)[1]), col=1, cex=cexgr, adj = 1)
mtext(text = sideTxt, side=2, line=10.5, las=0, col=1, cex=cexgr)
text(x=andeler[,1], y=ypos, labels = paste0(round(andeler[,1]), '%'), cex=0.9, pos=2, col='white')
text(x=(andeler[,1]+andeler[,2]), y=ypos, labels = paste0(round(andeler[,2]), ' %'), cex=0.9, pos=2)
text(x=rep(100, length(andeler[3])), y=ypos, labels = paste0(round(andeler[,3]), ' %'), cex=0.9, pos=2)
if (length(which(is.na(andeler[,1]))) > 0){
text(x=0, y=ypos[1:length(which(is.na(andeler[,1])))], labels = paste0('N < ', terskel), cex=0.9, pos=2)
}
par(xpd=TRUE)
legend('top', inset=c(vmarg,-.03), names(andeler), fill = farger[c(1,3,4)], ncol = 3, border = farger[c(1,3,4)],
bty = 'n', cex = 0.9)
par('mar'= oldpar_mar)
par('fig'= oldpar_fig)
par(xpd=FALSE)
# if (outfile != '') {dev.off()}
if (outfile != '') {savePlot(outfile, type=substr(outfile, nchar(outfile)-2, nchar(outfile)))}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_scalar_in.R
\name{is.scalar_in}
\alias{is.scalar_in}
\alias{is.scalar_in01}
\title{Test if scalar is in interval}
\usage{
is.scalar_in(left, right)
is.scalar_in01(x)
}
\arguments{
\item{left, right}{lower and upper bound}
\item{x}{R object to be tested, most likely a numeric vector of length one
(other formats are allowed but will always return \code{FALSE}).}
}
\value{
\code{is.scalar_in01} returns \code{TRUE} if \code{x} is an atomic vector of
length one and \code{0 <= as_numeric(x) <= 1}.
\code{is.scalar_in} return a function similar to \code{is.scalar_in01} but with
specified boundaries.
}
\description{
Test if scalar is in interval
}
\examples{
is.scalar_in01(.5) # TRUE
is.scalar_in01(5) # FALSE
is_scalar_in09 <- is.scalar_in(0,9)
is_scalar_in09(5) # TRUE
}
| /man/is.scalar_in.Rd | no_license | cran/incadata | R | false | true | 893 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_scalar_in.R
\name{is.scalar_in}
\alias{is.scalar_in}
\alias{is.scalar_in01}
\title{Test if scalar is in interval}
\usage{
is.scalar_in(left, right)
is.scalar_in01(x)
}
\arguments{
\item{left, right}{lower and upper bound}
\item{x}{R object to be tested, most likely a numeric vector of length one
(other formats are allowed but will always return \code{FALSE}).}
}
\value{
\code{is.scalar_in01} returns \code{TRUE} if \code{x} is an atomic vector of
length one and \code{0 <= as_numeric(x) <= 1}.
\code{is.scalar_in} return a function similar to \code{is.scalar_in01} but with
specified boundaries.
}
\description{
Test if scalar is in interval
}
\examples{
is.scalar_in01(.5) # TRUE
is.scalar_in01(5) # FALSE
is_scalar_in09 <- is.scalar_in(0,9)
is_scalar_in09(5) # TRUE
}
|
rankhospital <- function(state, outcome, rank) {
# This function returns the best hospital (lower mortality rate)
# for a given outcome
state <- toupper(state)
# Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
# Check that the state is valid
if(!(state %in% data[,7])) {
stop("invalid state")
}
# Return hospital name in that state with lowest 30-day death rate
statedata <- data[data[,7] == state,]
# First I need to extract the data for a particular outcome.
# If the outome is invalid, I stop the execution of the function.
if(outcome == "heart attack") {
statedata[,11] <- as.numeric(statedata[,11])
outcomeData <- statedata[!is.na(statedata[,11]),c(2,11)]
}
else if (outcome == "heart failure") {
statedata[,17] <- as.numeric(statedata[,17])
outcomeData <- statedata[!is.na(statedata[,17]),c(2,17)]
}
else if (outcome == "pneumonia") {
statedata[,23] <- as.numeric(statedata[,23])
outcomeData <- statedata[!is.na(statedata[,23]),c(2,23)]
}
else {
stop("invalid outcome")
}
# Then I convert rank to a numeric value
if (toupper(rank) == "BEST") {
rank <- 1
} else if (toupper(rank) == "WORST") {
rank <- nrow(outcomeData)
} else if (!is.numeric(rank)) {
return(NA)
}
# Then I need to check if the rank is valid, otherwise I return a NULL
if (nrow(outcomeData) < rank) {
return(NA)
}
# In the end, I extract the data and return it
outcomeData[,2] <- as.numeric(outcomeData[,2])
outcomeData <- outcomeData[order(outcomeData[,2]),]
finalData <- outcomeData[outcomeData[,2] == outcomeData[rank,2],]
finalData <- finalData[order(finalData[,1]),]
finalData[1,1]
} | /rankhospital.R | no_license | roccobarbi/ProgrammingAssignment3 | R | false | false | 1,742 | r | rankhospital <- function(state, outcome, rank) {
# This function returns the best hospital (lower mortality rate)
# for a given outcome
state <- toupper(state)
# Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
# Check that the state is valid
if(!(state %in% data[,7])) {
stop("invalid state")
}
# Return hospital name in that state with lowest 30-day death rate
statedata <- data[data[,7] == state,]
# First I need to extract the data for a particular outcome.
# If the outome is invalid, I stop the execution of the function.
if(outcome == "heart attack") {
statedata[,11] <- as.numeric(statedata[,11])
outcomeData <- statedata[!is.na(statedata[,11]),c(2,11)]
}
else if (outcome == "heart failure") {
statedata[,17] <- as.numeric(statedata[,17])
outcomeData <- statedata[!is.na(statedata[,17]),c(2,17)]
}
else if (outcome == "pneumonia") {
statedata[,23] <- as.numeric(statedata[,23])
outcomeData <- statedata[!is.na(statedata[,23]),c(2,23)]
}
else {
stop("invalid outcome")
}
# Then I convert rank to a numeric value
if (toupper(rank) == "BEST") {
rank <- 1
} else if (toupper(rank) == "WORST") {
rank <- nrow(outcomeData)
} else if (!is.numeric(rank)) {
return(NA)
}
# Then I need to check if the rank is valid, otherwise I return a NULL
if (nrow(outcomeData) < rank) {
return(NA)
}
# In the end, I extract the data and return it
outcomeData[,2] <- as.numeric(outcomeData[,2])
outcomeData <- outcomeData[order(outcomeData[,2]),]
finalData <- outcomeData[outcomeData[,2] == outcomeData[rank,2],]
finalData <- finalData[order(finalData[,1]),]
finalData[1,1]
} |
# This function is for filtering the low-quality iSNVs by specified positions
library(tidyverse)
pos_head_tail <- c(1:100, (29903-99):29903) # positions 1:100 and (29903-99):29903 should be removed;
df_primer_new <- read_tsv("../../2021-10-11_nCoV_primers/results/primers_20211011.bed", col_names=F) # primers
df_primer_old <- read_tsv("../../2021-10-11_nCoV_primers/results/primers_old.bed", col_names=F)
filter_by_pos <- function(df_input, pos_indels=NA){
stopifnot(all(c("pos", "con_base", "sec_base", "sample", "primer") %in% names(df_input)))
if(!is.numeric(df_input$pos[1])){df_input$pos <- as.numeric(df_input$pos)}
if(!is.na(pos_indels)){
df_input <- df_input %>% filter(!pos %in% pos_indels) # 1. remove INDELs
}
df_input <- df_input %>% filter(!pos %in% pos_head_tail) # 2. remove head and tail 100 bases
# 3. exclude all positions in the PCR primer binding regions
df_input <- df_input %>% mutate(pos_combn=paste(pos, primer))
pos_all <- unique(df_input$pos_combn)
check <- sapply(pos_all, function(x) {
pos_x <- as.numeric(strsplit(x, " ")[[1]][1])
primer_x <- strsplit(x, " ")[[1]][2]
if(primer_x=="new"){
any((df_primer_new$X2 <= pos_x) & (df_primer_new$X3 >= pos_x))
} else {
any((df_primer_old$X2 <= pos_x) & (df_primer_old$X3 >= pos_x))
}
})
df_input <- df_input %>% filter(!pos_combn %in% pos_all[check])
df_input <- df_input %>% filter(!pos %in% c(15494, 15489, 25381, 10194, 22422)) # 4. excluding primer/homoplasy sites, https://static-content.springer.com/esm/art%3A10.1038%2Fs41467-022-28420-7/MediaObjects/41467_2022_28420_MOESM1_ESM.pdf
return(df_input)
}
| /scripts/helper/isnv_position_filter.R | no_license | Leo-Poon-Lab/mutations-under-sarscov2-vaccination | R | false | false | 1,627 | r | # This function is for filtering the low-quality iSNVs by specified positions
library(tidyverse)
pos_head_tail <- c(1:100, (29903-99):29903) # positions 1:100 and (29903-99):29903 should be removed;
df_primer_new <- read_tsv("../../2021-10-11_nCoV_primers/results/primers_20211011.bed", col_names=F) # primers
df_primer_old <- read_tsv("../../2021-10-11_nCoV_primers/results/primers_old.bed", col_names=F)
filter_by_pos <- function(df_input, pos_indels=NA){
stopifnot(all(c("pos", "con_base", "sec_base", "sample", "primer") %in% names(df_input)))
if(!is.numeric(df_input$pos[1])){df_input$pos <- as.numeric(df_input$pos)}
if(!is.na(pos_indels)){
df_input <- df_input %>% filter(!pos %in% pos_indels) # 1. remove INDELs
}
df_input <- df_input %>% filter(!pos %in% pos_head_tail) # 2. remove head and tail 100 bases
# 3. exclude all positions in the PCR primer binding regions
df_input <- df_input %>% mutate(pos_combn=paste(pos, primer))
pos_all <- unique(df_input$pos_combn)
check <- sapply(pos_all, function(x) {
pos_x <- as.numeric(strsplit(x, " ")[[1]][1])
primer_x <- strsplit(x, " ")[[1]][2]
if(primer_x=="new"){
any((df_primer_new$X2 <= pos_x) & (df_primer_new$X3 >= pos_x))
} else {
any((df_primer_old$X2 <= pos_x) & (df_primer_old$X3 >= pos_x))
}
})
df_input <- df_input %>% filter(!pos_combn %in% pos_all[check])
df_input <- df_input %>% filter(!pos %in% c(15494, 15489, 25381, 10194, 22422)) # 4. excluding primer/homoplasy sites, https://static-content.springer.com/esm/art%3A10.1038%2Fs41467-022-28420-7/MediaObjects/41467_2022_28420_MOESM1_ESM.pdf
return(df_input)
}
|
#' Find WWW Domains
#'
#' @description Hand function to return the \code{www} domain.
#'
#' @param x A vector from which the domain information is desired.
#'
#' @keywords Manipulation
#'
#' @export
#' @examples
#' x1 <- "http://stackoverflow.com/questions/19020749/function-to-extract-domain-name-from-url-in-r"
#' x2 <- "http://www.talkstats.com/"
#' x3 <- "www.google.com"
#'
#' has.domain(x3)
#'
#' sapply(list(x1, x2, x3), has.domain)
`has.domain` <- function(x){
x<-tolower(x)
out <- strsplit(gsub("http://|https://|www\\.", "", x), "/")[[c(1, 1)]]
return(out)
}
| /SciencesPo/R/has.domain.R | no_license | ingted/R-Examples | R | false | false | 577 | r | #' Find WWW Domains
#'
#' @description Hand function to return the \code{www} domain.
#'
#' @param x A vector from which the domain information is desired.
#'
#' @keywords Manipulation
#'
#' @export
#' @examples
#' x1 <- "http://stackoverflow.com/questions/19020749/function-to-extract-domain-name-from-url-in-r"
#' x2 <- "http://www.talkstats.com/"
#' x3 <- "www.google.com"
#'
#' has.domain(x3)
#'
#' sapply(list(x1, x2, x3), has.domain)
`has.domain` <- function(x){
x<-tolower(x)
out <- strsplit(gsub("http://|https://|www\\.", "", x), "/")[[c(1, 1)]]
return(out)
}
|
log <- file(snakemake@log[[1]], open="wt")
sink(log, type = "output")
sink(log, type = "message")
library(tidyverse)
library(hexbin)
library(rdist)
##CKMRpop::install_spip(Dir = system.file(package = "CKMRpop"))
##remotes::install_github("eriqande/CKMRpop", build_vignettes = TRUE)
library(CKMRpop)
#vignette("species_1_simulation", package = "CKMRpop")
#### these are the values that get changed for different runs ####
cohort_size <- as.integer(snakemake@params$cohort_size)
SampleSize = as.integer(snakemake@params$SampleSize)
rep_num = as.integer(snakemake@params$rep_num)
# here are values for testing
cohort_size <- 2000
SampleSize <- 125
rep_num <- 1
my_seed = cohort_size * rep_num
#### Set some things for single runs ####
# We set NReps to 2 to make sure all the arrays get allocated and work
# the way they are supposed to. Then we only cycle over R from 1 to NReps - 1
# (i.e. 1 to 1).
NReps <- 2
SPD <- species_1_life_history
dummy = "2000CohortA"
dummy2 = paste(dummy,"sibs",sep="")
### define the scenario
survival = 0.7
alpha = 3 ## age at maturity
omega = 10 ## maximum age
adultlifespan = omega-alpha+1
phi = 1 ## ratio of Vk to kbar
##femalefecundity = c(0,0,alpha:omega) ## fecundity proportional to age
femalefecundity = c(0,0,rep(1,adultlifespan)) ## constant fecundity
##femalefecundity = c(rep(0,9),1) ## sweepstakes RS; only BOFFFs reproduce
samp_frac <- 2*SampleSize/cohort_size ## twice as large as target for subsampling
SPD$`number-of-years` <- 56 # run the sim forward for 100 years
samp_start_year <- 51
samp_stop_year <- 55
SPD[[4]] = c(0,0,1,1,1,1,1,1,1,1) ## prob of reproducing at each age
##SPD[[4]] = c(0,0,1,0,1,0,1,0,1,0)
##SPD[[4]] = c(0,0,rep(0.1,8))
SPD[[6]] = femalefecundity
a=as.numeric(Sys.time())
set.seed(a)
##Scenario B
SPD[[1]] = omega
SPD[[2]] = c(1,rep(survival,(SPD[[1]]-1)))
SPD[[3]] = SPD[[2]]
SPD[[5]] = SPD[[4]]
SPD[[7]] = SPD[[6]]
SPD[[8]] = "negbin"
SPD[[9]] = 1/phi
SPD[[10]] = SPD[[9]]
SPD[[11]] = -1
SPD[[12]] = 0.5
L <- leslie_from_spip(SPD, cohort_size)
# then we add those to the spip parameters
SPD$`initial-males` <- floor(L$stable_age_distro_fem)
SPD$`initial-females` <- floor(L$stable_age_distro_male)
# tell spip to use the cohort size
SPD$`cohort-size` <- paste("const", cohort_size, collapse = " ")
SPD$`fixed-cohort-size` <- "" # define this flag and give it an empty string as an argument
sfspace = paste("0 ")
range = paste(samp_start_year,"-",samp_stop_year,sep="")
SPD$`discard-all` <- 0
SPD$`gtyp-ppn-fem-pre` <- paste(range, "0 ", samp_frac, paste(rep(sfspace, SPD$'max-age' - 2), collapse = ""))
SPD$`gtyp-ppn-male-pre` <- SPD$`gtyp-ppn-fem-pre`
# eric reduces the memory over-allocation here
SPD$`alloc-extra` <- 2
Born = as.integer((samp_start_year:samp_stop_year)-2)
YearsSamp = length(Born)
BigGapHalf = array(data=0, dim = c(YearsSamp,YearsSamp,NReps))
colnames(BigGapHalf) = c("Year1","Year2","Year3","Year4","Year5")
BigGapFull = BigGapHalf
BigCohorts = matrix(NA,NReps,YearsSamp)
colnames(BigCohorts) = Born
BigNb = matrix(NA,NReps,YearsSamp)
BigNbdads = BigNb
BigNbmoms = BigNb
BigSibsSame = array(data = 0, dim = c(YearsSamp,4,NReps))
dimnames(BigSibsSame)[[2]] = c("cohort","FSP","PHSP","MHSP")
BigSibsSame[,1,] = Born
#########
GetSibs <- function(Pedigree2) {
# remove duplicate rows in the pedigree
Pedigree2 = Pedigree2[!duplicated(Pedigree2), ]
# convert the parent IDs to unique integers (faster to compare)
unique_moms = sort(unique(Pedigree2$Mom))
unique_dads = sort(unique(Pedigree2$Dad))
mom_range = 1:length(unique_moms)
dad_range = 1:length(unique_dads)
names(mom_range) = unique_moms
names(dad_range) = unique_dads
Pedigree2$Mom_id = mom_range[Pedigree2$Mom]
Pedigree2$Dad_id = dad_range[Pedigree2$Dad]
# make n x n matrix (n=number of offspring) values are zero if the pair of offspring shares a parent, positive otherwise
mom_matrix = pdist(Pedigree2$Mom_id)
dad_matrix = pdist(Pedigree2$Dad_id)
# extract the inds sharing parents, don't double count, and sort
mom_matches = which(mom_matrix==0,arr.ind = T)
mom_matches = mom_matches[mom_matches[,1] < mom_matches[,2], ]
dad_matches = which(dad_matrix==0,arr.ind = T)
dad_matches = dad_matches[dad_matches[,1] < dad_matches[,2], ]
mom_matches = mom_matches[order(mom_matches[,1], mom_matches[,2]),]
dad_matches = dad_matches[order(dad_matches[,1], dad_matches[,2]),]
# convert to data.frame - i1, i2 gives the individual - row number in the original pedigree file
mom_df = data.frame(mom_matches)
names(mom_df) = c('i1', 'i2')
mom_df$parent = 'mom'
dad_df = data.frame(dad_matches)
names(dad_df) = c('i1', 'i2')
dad_df$parent = 'dad'
# merge the dfs of the mom and dad matches to find full sibs
sibs = merge(mom_df, dad_df, by = c('i1', 'i2'), all=T)
# arrays below are boolean indexes into the sibs df, telling us how the pair shares parents
share_both = !(is.na(sibs$parent.x) | is.na(sibs$parent.y))
share_mom = is.na(sibs$parent.x)
share_dad = is.na(sibs$parent.y)
# we can count the number of pairs in each category
##sum(share_both)
##sum(share_mom)
##sum(share_dad)
# construct the output file
ms = cbind(Pedigree2[sibs[share_mom,]$i1,][, c('sampleID', 'YearBorn')],
Pedigree2[sibs[share_mom,]$i2,][, c('sampleID', 'YearBorn')])
names(ms) = c('ID1', 'Born1', 'ID2', 'Born2')
ms$Parent = 'mom'
ds = cbind(Pedigree2[sibs[share_mom,]$i1,][, c('sampleID', 'YearBorn')],
Pedigree2[sibs[share_mom,]$i2,][, c('sampleID', 'YearBorn')])
names(ds) = c('ID1', 'Born1', 'ID2', 'Born2')
ds$Parent = 'dad'
if (sum(share_both)>0) {
fs = cbind(Pedigree2[sibs[share_both,]$i1,][, c('sampleID', 'YearBorn')],
Pedigree2[sibs[share_both,]$i2,][, c('sampleID', 'YearBorn')])
names(fs) = c('ID1', 'Born1', 'ID2', 'Born2')
fs$Parent = 'both'
all_sibs = rbind(fs, ms, ds) }
else { all_sibs = rbind(ms, ds) }
return(all_sibs) } # end function
######### start simulation
for (R in 1:(NReps-1)) {
print(paste0("Replicate = ",R))
flush.console()
BigSibsHalf = matrix(0,YearsSamp,YearsSamp)
BigSibsFull = BigSibsHalf
set.seed(my_seed)
spip_dir <- run_spip(
pars = SPD
)
# now read that in and find relatives within the one-generation pedigree
slurped <- slurp_spip(spip_dir, 1)
# First, get the non-genotype info for each individual all together
non_geno_stuff <- slurped$samples %>%
mutate(YearSampled = map_int(samp_years_list, 1)) %>% # this gets the sample year out of the samp_years_list
select(ID, born_year, YearSampled, sex) %>% # pick out column in the order desired
left_join(slurped$pedigree %>% select(kid, ma, pa), by = c("ID" = "kid")) %>% # add mom and dad on there
rename(
sampleID = ID,
YearBorn = born_year,
Sex = sex,
Mom = ma,
Dad = pa
) # change the column names to what Robin wants
Pedigree = non_geno_stuff
## get cohort size each year
a = table(Pedigree$YearBorn)
BigCohorts[R,] = a
## get total Nb each year
Nbmoms = 1:YearsSamp
Nbdads = 1:YearsSamp
for (j in 1:YearsSamp) {
year = Born[j]
cohort = subset(Pedigree,Pedigree$YearBorn == year)
RSmoms = table(cohort$Mom)
SSmoms = sum(RSmoms^2)
if(SSmoms > sum(RSmoms)) { Nbmoms[j] = (sum(RSmoms)-1)/(SSmoms/sum(RSmoms)-1) }
else {Nbmoms[j] = 99999}
RSdads = table(cohort$Dad)
SSdads = sum(RSdads^2)
if(SSdads > sum(RSdads)) {Nbdads[j] = (sum(RSdads)-1)/(SSdads/sum(RSdads)-1) }
else {Nbdads[j] = 99999}
}
BigNbmoms[R,] = Nbmoms
BigNbdads[R,] = Nbdads
BigNb[R,] = 4*Nbmoms*Nbdads/(Nbmoms+Nbdads)
######### subsample cohorts of offspring
Pedigree2 = Pedigree[1,]
for (j in 1:YearsSamp) {
year = Born[j]
cohort = subset(Pedigree,Pedigree$YearBorn == year)
sampled = cohort[sample(nrow(cohort),SampleSize,replace=F),]
Pedigree2 = rbind(Pedigree2,sampled)
} # end for j
Pedigree2 = Pedigree2[-1,]
sibs = GetSibs(Pedigree2)
## get age gaps between sibs
Gap = abs(sibs$Born2 - sibs$Born1)
MHSP = subset(sibs,sibs$Parent == "mom")
PHSP = subset(sibs,sibs$Parent == "dad")
Halfs = rbind(MHSP,PHSP)
FSP = subset(sibs,sibs$Parent == "both")
##get within cohort sibs
withinhalf = subset(Halfs,Halfs$Born1 == Halfs$Born2)
for(j in 1:YearsSamp) {
bit = subset(withinhalf,withinhalf$Born1 == Born[j])
BigSibsHalf[j,j] = nrow(bit)
} # end for j
withinfull = subset(FSP,FSP$Born1 == FSP$Born2)
for(j in 1:YearsSamp) {
bit2 = subset(withinfull,withinfull$Born1 == Born[j])
BigSibsFull[j,j] = nrow(bit2)
}## end for j
##get arcoss cohort sibs
acrosshalf = subset(Halfs,Halfs$Born1 != Halfs$Born2)
for(j in 1:(YearsSamp-1)) {
for(k in (j+1):YearsSamp) {
bit = subset(acrosshalf,acrosshalf$Born1 == Born[j] & acrosshalf$Born2 == Born[k])
BigSibsHalf[j,k] = nrow(bit)
}} # end for j,k
acrossfull = subset(FSP,FSP$Born1 != FSP$Born2)
for(j in 1:(YearsSamp-1)) {
for(k in (j+1):YearsSamp) {
bit = subset(acrossfull,acrossfull$Born1 == Born[j] & acrossfull$Born2 == Born[k])
BigSibsFull[j,k] = nrow(bit)
}} # end for j,k
BigGapHalf[,,R] = BigSibsHalf
BigGapFull[,,R] = BigSibsFull
} # end for R
# Now we just save the BigSibsHalf and BigSibsFull for summarizing later.
write_rds(
list(
bsh = BigSibsHalf,
bsf = BigSibsFull,
slurped = slurped,
Pedigree = Pedigree
), file = snakemake@output[[1]]
)
| /R/single-rep-full-output.R | no_license | eriqande/runnin_robins_ckmrpop_sims_on_sedna | R | false | false | 9,232 | r | log <- file(snakemake@log[[1]], open="wt")
sink(log, type = "output")
sink(log, type = "message")
library(tidyverse)
library(hexbin)
library(rdist)
##CKMRpop::install_spip(Dir = system.file(package = "CKMRpop"))
##remotes::install_github("eriqande/CKMRpop", build_vignettes = TRUE)
library(CKMRpop)
#vignette("species_1_simulation", package = "CKMRpop")
#### these are the values that get changed for different runs ####
cohort_size <- as.integer(snakemake@params$cohort_size)
SampleSize = as.integer(snakemake@params$SampleSize)
rep_num = as.integer(snakemake@params$rep_num)
# here are values for testing
cohort_size <- 2000
SampleSize <- 125
rep_num <- 1
my_seed = cohort_size * rep_num
#### Set some things for single runs ####
# We set NReps to 2 to make sure all the arrays get allocated and work
# the way they are supposed to. Then we only cycle over R from 1 to NReps - 1
# (i.e. 1 to 1).
NReps <- 2
SPD <- species_1_life_history
dummy = "2000CohortA"
dummy2 = paste(dummy,"sibs",sep="")
### define the scenario
survival = 0.7
alpha = 3 ## age at maturity
omega = 10 ## maximum age
adultlifespan = omega-alpha+1
phi = 1 ## ratio of Vk to kbar
##femalefecundity = c(0,0,alpha:omega) ## fecundity proportional to age
femalefecundity = c(0,0,rep(1,adultlifespan)) ## constant fecundity
##femalefecundity = c(rep(0,9),1) ## sweepstakes RS; only BOFFFs reproduce
samp_frac <- 2*SampleSize/cohort_size ## twice as large as target for subsampling
SPD$`number-of-years` <- 56 # run the sim forward for 100 years
samp_start_year <- 51
samp_stop_year <- 55
SPD[[4]] = c(0,0,1,1,1,1,1,1,1,1) ## prob of reproducing at each age
##SPD[[4]] = c(0,0,1,0,1,0,1,0,1,0)
##SPD[[4]] = c(0,0,rep(0.1,8))
SPD[[6]] = femalefecundity
a=as.numeric(Sys.time())
set.seed(a)
##Scenario B
SPD[[1]] = omega
SPD[[2]] = c(1,rep(survival,(SPD[[1]]-1)))
SPD[[3]] = SPD[[2]]
SPD[[5]] = SPD[[4]]
SPD[[7]] = SPD[[6]]
SPD[[8]] = "negbin"
SPD[[9]] = 1/phi
SPD[[10]] = SPD[[9]]
SPD[[11]] = -1
SPD[[12]] = 0.5
L <- leslie_from_spip(SPD, cohort_size)
# then we add those to the spip parameters
SPD$`initial-males` <- floor(L$stable_age_distro_fem)
SPD$`initial-females` <- floor(L$stable_age_distro_male)
# tell spip to use the cohort size
SPD$`cohort-size` <- paste("const", cohort_size, collapse = " ")
SPD$`fixed-cohort-size` <- "" # define this flag and give it an empty string as an argument
sfspace = paste("0 ")
range = paste(samp_start_year,"-",samp_stop_year,sep="")
SPD$`discard-all` <- 0
SPD$`gtyp-ppn-fem-pre` <- paste(range, "0 ", samp_frac, paste(rep(sfspace, SPD$'max-age' - 2), collapse = ""))
SPD$`gtyp-ppn-male-pre` <- SPD$`gtyp-ppn-fem-pre`
# eric reduces the memory over-allocation here
SPD$`alloc-extra` <- 2
Born = as.integer((samp_start_year:samp_stop_year)-2)
YearsSamp = length(Born)
BigGapHalf = array(data=0, dim = c(YearsSamp,YearsSamp,NReps))
colnames(BigGapHalf) = c("Year1","Year2","Year3","Year4","Year5")
BigGapFull = BigGapHalf
BigCohorts = matrix(NA,NReps,YearsSamp)
colnames(BigCohorts) = Born
BigNb = matrix(NA,NReps,YearsSamp)
BigNbdads = BigNb
BigNbmoms = BigNb
BigSibsSame = array(data = 0, dim = c(YearsSamp,4,NReps))
dimnames(BigSibsSame)[[2]] = c("cohort","FSP","PHSP","MHSP")
BigSibsSame[,1,] = Born
#########
GetSibs <- function(Pedigree2) {
# remove duplicate rows in the pedigree
Pedigree2 = Pedigree2[!duplicated(Pedigree2), ]
# convert the parent IDs to unique integers (faster to compare)
unique_moms = sort(unique(Pedigree2$Mom))
unique_dads = sort(unique(Pedigree2$Dad))
mom_range = 1:length(unique_moms)
dad_range = 1:length(unique_dads)
names(mom_range) = unique_moms
names(dad_range) = unique_dads
Pedigree2$Mom_id = mom_range[Pedigree2$Mom]
Pedigree2$Dad_id = dad_range[Pedigree2$Dad]
# make n x n matrix (n=number of offspring) values are zero if the pair of offspring shares a parent, positive otherwise
mom_matrix = pdist(Pedigree2$Mom_id)
dad_matrix = pdist(Pedigree2$Dad_id)
# extract the inds sharing parents, don't double count, and sort
mom_matches = which(mom_matrix==0,arr.ind = T)
mom_matches = mom_matches[mom_matches[,1] < mom_matches[,2], ]
dad_matches = which(dad_matrix==0,arr.ind = T)
dad_matches = dad_matches[dad_matches[,1] < dad_matches[,2], ]
mom_matches = mom_matches[order(mom_matches[,1], mom_matches[,2]),]
dad_matches = dad_matches[order(dad_matches[,1], dad_matches[,2]),]
# convert to data.frame - i1, i2 gives the individual - row number in the original pedigree file
mom_df = data.frame(mom_matches)
names(mom_df) = c('i1', 'i2')
mom_df$parent = 'mom'
dad_df = data.frame(dad_matches)
names(dad_df) = c('i1', 'i2')
dad_df$parent = 'dad'
# merge the dfs of the mom and dad matches to find full sibs
sibs = merge(mom_df, dad_df, by = c('i1', 'i2'), all=T)
# arrays below are boolean indexes into the sibs df, telling us how the pair shares parents
share_both = !(is.na(sibs$parent.x) | is.na(sibs$parent.y))
share_mom = is.na(sibs$parent.x)
share_dad = is.na(sibs$parent.y)
# we can count the number of pairs in each category
##sum(share_both)
##sum(share_mom)
##sum(share_dad)
# construct the output file
ms = cbind(Pedigree2[sibs[share_mom,]$i1,][, c('sampleID', 'YearBorn')],
Pedigree2[sibs[share_mom,]$i2,][, c('sampleID', 'YearBorn')])
names(ms) = c('ID1', 'Born1', 'ID2', 'Born2')
ms$Parent = 'mom'
ds = cbind(Pedigree2[sibs[share_mom,]$i1,][, c('sampleID', 'YearBorn')],
Pedigree2[sibs[share_mom,]$i2,][, c('sampleID', 'YearBorn')])
names(ds) = c('ID1', 'Born1', 'ID2', 'Born2')
ds$Parent = 'dad'
if (sum(share_both)>0) {
fs = cbind(Pedigree2[sibs[share_both,]$i1,][, c('sampleID', 'YearBorn')],
Pedigree2[sibs[share_both,]$i2,][, c('sampleID', 'YearBorn')])
names(fs) = c('ID1', 'Born1', 'ID2', 'Born2')
fs$Parent = 'both'
all_sibs = rbind(fs, ms, ds) }
else { all_sibs = rbind(ms, ds) }
return(all_sibs) } # end function
######### start simulation
for (R in 1:(NReps-1)) {
print(paste0("Replicate = ",R))
flush.console()
BigSibsHalf = matrix(0,YearsSamp,YearsSamp)
BigSibsFull = BigSibsHalf
set.seed(my_seed)
spip_dir <- run_spip(
pars = SPD
)
# now read that in and find relatives within the one-generation pedigree
slurped <- slurp_spip(spip_dir, 1)
# First, get the non-genotype info for each individual all together
non_geno_stuff <- slurped$samples %>%
mutate(YearSampled = map_int(samp_years_list, 1)) %>% # this gets the sample year out of the samp_years_list
select(ID, born_year, YearSampled, sex) %>% # pick out column in the order desired
left_join(slurped$pedigree %>% select(kid, ma, pa), by = c("ID" = "kid")) %>% # add mom and dad on there
rename(
sampleID = ID,
YearBorn = born_year,
Sex = sex,
Mom = ma,
Dad = pa
) # change the column names to what Robin wants
Pedigree = non_geno_stuff
## get cohort size each year
a = table(Pedigree$YearBorn)
BigCohorts[R,] = a
## get total Nb each year
Nbmoms = 1:YearsSamp
Nbdads = 1:YearsSamp
for (j in 1:YearsSamp) {
year = Born[j]
cohort = subset(Pedigree,Pedigree$YearBorn == year)
RSmoms = table(cohort$Mom)
SSmoms = sum(RSmoms^2)
if(SSmoms > sum(RSmoms)) { Nbmoms[j] = (sum(RSmoms)-1)/(SSmoms/sum(RSmoms)-1) }
else {Nbmoms[j] = 99999}
RSdads = table(cohort$Dad)
SSdads = sum(RSdads^2)
if(SSdads > sum(RSdads)) {Nbdads[j] = (sum(RSdads)-1)/(SSdads/sum(RSdads)-1) }
else {Nbdads[j] = 99999}
}
BigNbmoms[R,] = Nbmoms
BigNbdads[R,] = Nbdads
BigNb[R,] = 4*Nbmoms*Nbdads/(Nbmoms+Nbdads)
######### subsample cohorts of offspring
Pedigree2 = Pedigree[1,]
for (j in 1:YearsSamp) {
year = Born[j]
cohort = subset(Pedigree,Pedigree$YearBorn == year)
sampled = cohort[sample(nrow(cohort),SampleSize,replace=F),]
Pedigree2 = rbind(Pedigree2,sampled)
} # end for j
Pedigree2 = Pedigree2[-1,]
sibs = GetSibs(Pedigree2)
## get age gaps between sibs
Gap = abs(sibs$Born2 - sibs$Born1)
MHSP = subset(sibs,sibs$Parent == "mom")
PHSP = subset(sibs,sibs$Parent == "dad")
Halfs = rbind(MHSP,PHSP)
FSP = subset(sibs,sibs$Parent == "both")
##get within cohort sibs
withinhalf = subset(Halfs,Halfs$Born1 == Halfs$Born2)
for(j in 1:YearsSamp) {
bit = subset(withinhalf,withinhalf$Born1 == Born[j])
BigSibsHalf[j,j] = nrow(bit)
} # end for j
withinfull = subset(FSP,FSP$Born1 == FSP$Born2)
for(j in 1:YearsSamp) {
bit2 = subset(withinfull,withinfull$Born1 == Born[j])
BigSibsFull[j,j] = nrow(bit2)
}## end for j
##get arcoss cohort sibs
acrosshalf = subset(Halfs,Halfs$Born1 != Halfs$Born2)
for(j in 1:(YearsSamp-1)) {
for(k in (j+1):YearsSamp) {
bit = subset(acrosshalf,acrosshalf$Born1 == Born[j] & acrosshalf$Born2 == Born[k])
BigSibsHalf[j,k] = nrow(bit)
}} # end for j,k
acrossfull = subset(FSP,FSP$Born1 != FSP$Born2)
for(j in 1:(YearsSamp-1)) {
for(k in (j+1):YearsSamp) {
bit = subset(acrossfull,acrossfull$Born1 == Born[j] & acrossfull$Born2 == Born[k])
BigSibsFull[j,k] = nrow(bit)
}} # end for j,k
BigGapHalf[,,R] = BigSibsHalf
BigGapFull[,,R] = BigSibsFull
} # end for R
# Now we just save the BigSibsHalf and BigSibsFull for summarizing later.
write_rds(
list(
bsh = BigSibsHalf,
bsf = BigSibsFull,
slurped = slurped,
Pedigree = Pedigree
), file = snakemake@output[[1]]
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/01-get_data.R
\name{get_data_of_brazil}
\alias{get_data_of_brazil}
\title{Downloading data from Ibama database - Multas arrecadadas}
\usage{
get_data_of_brazil(estados = "all", tipo_multa)
}
\value{
A tibble
}
\description{
Downloading data from Ibama database - Multas arrecadadas
}
\keyword{internal}
| /man/get_data_of_brazil.Rd | permissive | cccneto/Ibamam | R | false | true | 381 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/01-get_data.R
\name{get_data_of_brazil}
\alias{get_data_of_brazil}
\title{Downloading data from Ibama database - Multas arrecadadas}
\usage{
get_data_of_brazil(estados = "all", tipo_multa)
}
\value{
A tibble
}
\description{
Downloading data from Ibama database - Multas arrecadadas
}
\keyword{internal}
|
######## Fleming and Harrington #########
######## Ref. paper T. Hasegawa 2014 pharmaceutical statistics ##########
sample.size_FH<-function(eps,p,b,tau,omega,lambda,lambda.trt,rho, gamma,alpha,beta){
# every sequence starts from 0
# b<-30
z.alpha<-qnorm(p=alpha,lower.tail = FALSE)
z.beta<-qnorm(p=beta,lower.tail = FALSE)
n_sub<-floor(b*tau)
t<-c(0,seq(1,n_sub)/b)
h_1<-rep(lambda,(n_sub+1)) #control
h_2<-c(rep(lambda,round(eps*b)),rep(lambda.trt,n_sub-round(eps*b)+1)) #treatment
N_1<-rep((1-p),(n_sub+1))
N_2<-rep(p,(n_sub+1))
for(i in 1:(n_sub-1)){
N_1[i+1]<-N_1[i]*(1-h_1[i]/b-(t[i]>omega)/b/(tau-t[i]))
N_2[i+1]<-N_2[i]*(1-h_2[i]/b-(t[i]>omega)/b/(tau-t[i]))
}
N_1[n_sub+1]<-N_2[n_sub+1]<-0
f_S_1<-function(x) exp(-lambda*x)
f_S_2<-function(x) (x<eps)*exp(-lambda*x)+(x>=eps)*exp(-(lambda*eps+lambda.trt*(x-eps)))
#f_S_2_2<-function(x) (x<eps)*exp(-lambda*x)+(x>=eps)*exp(-eps*lambda.trt*(1/theta-1))*exp(-lambda.trt*x)
S_1<-f_S_1(t)
S_2<-f_S_2(t)
S<-(1-p)*S_1+p*S_2
D<-(h_1*N_1+h_2*N_2)/b
theta_seq<-h_2/h_1
phi<-N_2/N_1
r<-S^rho*(1-S)^gamma
num_vec<-D*r*(phi*theta_seq/(1+phi*theta_seq)-phi/(1+phi))
den_vec<-D*r^2*phi/(1+phi)^2
E.star_num<-sum(num_vec[1:n_sub])
E.star_den<-sqrt(sum(den_vec[1:n_sub]))
E.star<-E.star_num/E.star_den
n<-(z.alpha+z.beta)^2/E.star^2
n_event<-sum(D)*n
return(list(n=ceiling(ceiling(n*p)/p), n_event= ceiling(ceiling(n_event*p)/p),E.star=E.star,sum_D=sum(D[1:n_sub]),D=D[1:n_sub],den_vec=den_vec[1:n_sub],num_vec=num_vec[1:n_sub],time_vec=seq(1,n_sub)/b))
}
if(F){
# Example 1 in the paper Hasegawa(2014)
p<-2/3
tau<-66
omega<-18
eps<-6
#theta<-0.79 #percent of full trt effect
#lambda<-log(2)/6 # median survival time 10 months
#lambda.trt<-lambda*theta #full treatment effect
m1=21.7
m2=25.8
lambda<-log(2)/m1#log(2)/21.7 # median survival time 10 months
lambda.trt<-log(2)*(m1-eps)/(m2-eps)/m1 #full treatment effect
(theta=lambda.trt/lambda)
alpha<-0.025
beta<-0.1
rho=0
gamma=1
b=30
res=sample.size_FH(eps,p,b,tau,omega,lambda,lambda.trt,rho, gamma,alpha,beta)
length(res$den_vec)
length(res$D)
}
#' The average hazard ratio calculation according to Hasegawa(2016) Phamaceutical Statistics paper
avg.haz<-function(theta, eps,lambda,p=1/2){
term1<-1/2/p*(1-exp(-2*p*lambda*eps))+theta/p/(1+theta)*exp(-lambda*eps*(1+p-theta*(1-p)))
term2<-1/2/p*(1-exp(-2*p*lambda*eps))+1/p/(1+theta)*exp(-lambda*eps*(1+p-theta*(1-p)))
term1/term2
}
# test
#lambda=log(2)/6
#theta=0.7
#eps=2
#avg.haz(theta,eps,lambda)
| /R/Hasegawa2014.R | no_license | biostata/IAfrac | R | false | false | 2,569 | r | ######## Fleming and Harrington #########
######## Ref. paper T. Hasegawa 2014 pharmaceutical statistics ##########
sample.size_FH<-function(eps,p,b,tau,omega,lambda,lambda.trt,rho, gamma,alpha,beta){
# every sequence starts from 0
# b<-30
z.alpha<-qnorm(p=alpha,lower.tail = FALSE)
z.beta<-qnorm(p=beta,lower.tail = FALSE)
n_sub<-floor(b*tau)
t<-c(0,seq(1,n_sub)/b)
h_1<-rep(lambda,(n_sub+1)) #control
h_2<-c(rep(lambda,round(eps*b)),rep(lambda.trt,n_sub-round(eps*b)+1)) #treatment
N_1<-rep((1-p),(n_sub+1))
N_2<-rep(p,(n_sub+1))
for(i in 1:(n_sub-1)){
N_1[i+1]<-N_1[i]*(1-h_1[i]/b-(t[i]>omega)/b/(tau-t[i]))
N_2[i+1]<-N_2[i]*(1-h_2[i]/b-(t[i]>omega)/b/(tau-t[i]))
}
N_1[n_sub+1]<-N_2[n_sub+1]<-0
f_S_1<-function(x) exp(-lambda*x)
f_S_2<-function(x) (x<eps)*exp(-lambda*x)+(x>=eps)*exp(-(lambda*eps+lambda.trt*(x-eps)))
#f_S_2_2<-function(x) (x<eps)*exp(-lambda*x)+(x>=eps)*exp(-eps*lambda.trt*(1/theta-1))*exp(-lambda.trt*x)
S_1<-f_S_1(t)
S_2<-f_S_2(t)
S<-(1-p)*S_1+p*S_2
D<-(h_1*N_1+h_2*N_2)/b
theta_seq<-h_2/h_1
phi<-N_2/N_1
r<-S^rho*(1-S)^gamma
num_vec<-D*r*(phi*theta_seq/(1+phi*theta_seq)-phi/(1+phi))
den_vec<-D*r^2*phi/(1+phi)^2
E.star_num<-sum(num_vec[1:n_sub])
E.star_den<-sqrt(sum(den_vec[1:n_sub]))
E.star<-E.star_num/E.star_den
n<-(z.alpha+z.beta)^2/E.star^2
n_event<-sum(D)*n
return(list(n=ceiling(ceiling(n*p)/p), n_event= ceiling(ceiling(n_event*p)/p),E.star=E.star,sum_D=sum(D[1:n_sub]),D=D[1:n_sub],den_vec=den_vec[1:n_sub],num_vec=num_vec[1:n_sub],time_vec=seq(1,n_sub)/b))
}
if(F){
# Example 1 in the paper Hasegawa(2014)
p<-2/3
tau<-66
omega<-18
eps<-6
#theta<-0.79 #percent of full trt effect
#lambda<-log(2)/6 # median survival time 10 months
#lambda.trt<-lambda*theta #full treatment effect
m1=21.7
m2=25.8
lambda<-log(2)/m1#log(2)/21.7 # median survival time 10 months
lambda.trt<-log(2)*(m1-eps)/(m2-eps)/m1 #full treatment effect
(theta=lambda.trt/lambda)
alpha<-0.025
beta<-0.1
rho=0
gamma=1
b=30
res=sample.size_FH(eps,p,b,tau,omega,lambda,lambda.trt,rho, gamma,alpha,beta)
length(res$den_vec)
length(res$D)
}
#' The average hazard ratio calculation according to Hasegawa(2016) Phamaceutical Statistics paper
avg.haz<-function(theta, eps,lambda,p=1/2){
term1<-1/2/p*(1-exp(-2*p*lambda*eps))+theta/p/(1+theta)*exp(-lambda*eps*(1+p-theta*(1-p)))
term2<-1/2/p*(1-exp(-2*p*lambda*eps))+1/p/(1+theta)*exp(-lambda*eps*(1+p-theta*(1-p)))
term1/term2
}
# test
#lambda=log(2)/6
#theta=0.7
#eps=2
#avg.haz(theta,eps,lambda)
|
## Save spp2exclude, spp2include and sitenames as internal data
exclosures:::save_internaldata()
library(exclosures)
#### READ AND PREPROCESS DATA #####
## Read site info
read_siteinfo("data-raw/sites_info_raw.csv")
## Read and prepare species info
read_sppinfo(sppdata = "data-raw/species_info_raw.csv")
## Read and process raw cover data
read_rawcover(rawcover = "data-raw/exclosures_cover_raw.csv",
tr.length = 25)
## Read and prepare damage data
read_damage("data-raw/exclosure_damage_raw.csv")
## Prepare dataset
make_dataset()
#### EXPLORATORY ANALYSIS ####
rmarkdown::render("analyses/EDA.Rmd")
#### MANUSCRIPT ####
rmarkdown::render("manuscript/cercados_Almoraima/cercados_Almoraima.Rmd")
| /makefile.R | permissive | Pakillo/exclosures-Almoraima | R | false | false | 731 | r |
## Save spp2exclude, spp2include and sitenames as internal data
exclosures:::save_internaldata()
library(exclosures)
#### READ AND PREPROCESS DATA #####
## Read site info
read_siteinfo("data-raw/sites_info_raw.csv")
## Read and prepare species info
read_sppinfo(sppdata = "data-raw/species_info_raw.csv")
## Read and process raw cover data
read_rawcover(rawcover = "data-raw/exclosures_cover_raw.csv",
tr.length = 25)
## Read and prepare damage data
read_damage("data-raw/exclosure_damage_raw.csv")
## Prepare dataset
make_dataset()
#### EXPLORATORY ANALYSIS ####
rmarkdown::render("analyses/EDA.Rmd")
#### MANUSCRIPT ####
rmarkdown::render("manuscript/cercados_Almoraima/cercados_Almoraima.Rmd")
|
options( show.error.messages=F, error = function () { cat( geterrmessage(), file=stderr() ); q( "no", 1, F ) } )
# we need that to not crash galaxy with an UTF8 error on German LC settings.
loc <- Sys.setlocale("LC_MESSAGES", "en_US.UTF-8")
suppressPackageStartupMessages({
library("fgsea")
library("optparse")
library("ggplot2")
})
option_list <- list(
make_option(c("-id_file", "--id_file"), type="character", help="Path to file with IDs to convert"),
make_option(c("-out_tab","--out_tab"), type="character", help="Path to output file."),
make_option(c("-id_type","--id_type"),type="character",help="Type of the incoming IDs"),
make_option(c("-organism","--organism"), type="character",help="Which organism the IDs belong to"),
make_option(c("-include_go","--include_go"),type="logical",default=TRUE,help="if TRUE, include GO IDs in the output"),
make_option(c("-include_kegg","--include_kegg"),type="logical",default=TRUE,help="If TRUE, include KEGG pathways in the output"),
make_option(c("-file_has_header","--file_has_header"),type="logical",default=TRUE,help="If this option is set to TRUE, the tool will assume that the ranked gene-list has a column heading and the gene names commence on the second line")
)
parser <- OptionParser(usage = "%prog [options] file", option_list=option_list)
args = parse_args(parser)
# Vars:
id_file <- args$id_file
out_tab <- args$out_tab
id_type <- args$id_type
organism <- args$organism
include_go <- args$include_go
include_kegg <- args$include_kegg
file_has_header = args$file_has_header
## If testing locally, change to TRUE and arguments will be set below
run_local <- FALSE
if (run_local) {
id_file <- "genelist.txt"
out_tab <- "anno.txt"
id_type <- "SYMBOL"
organism <- "hs"
include_go <- FALSE
include_kegg <- FALSE
file_has_header <- FALSE
}
ids <- as.character(read.table(id_file)[,1],header=file_has_header)
if(organism == "hs"){
suppressPackageStartupMessages(library(org.Hs.eg.db))
db <- org.Hs.eg.db
} else if (organism == "Mm"){
library(org.Mm.eg.db)
db <- org.Mm.eg.db
} else cat(paste("Organism type not supported", organism))
columns <- c("SYMBOL","ENSEMBL","ENTREZID")
if (include_go) columns <- c(columns, "GO")
if (include_kegg) columns <- c(columns, "PATH")
result <- select(db, keys = ids,keytype = id_type,columns = columns )
write.table(result, file=out_tab,sep="\t",row.names=FALSE,quote=FALSE)
| /annotateMyIDs.r | permissive | galaxycammel/galaxy-annotateMyIDs | R | false | false | 2,424 | r | options( show.error.messages=F, error = function () { cat( geterrmessage(), file=stderr() ); q( "no", 1, F ) } )
# we need that to not crash galaxy with an UTF8 error on German LC settings.
loc <- Sys.setlocale("LC_MESSAGES", "en_US.UTF-8")
suppressPackageStartupMessages({
library("fgsea")
library("optparse")
library("ggplot2")
})
option_list <- list(
make_option(c("-id_file", "--id_file"), type="character", help="Path to file with IDs to convert"),
make_option(c("-out_tab","--out_tab"), type="character", help="Path to output file."),
make_option(c("-id_type","--id_type"),type="character",help="Type of the incoming IDs"),
make_option(c("-organism","--organism"), type="character",help="Which organism the IDs belong to"),
make_option(c("-include_go","--include_go"),type="logical",default=TRUE,help="if TRUE, include GO IDs in the output"),
make_option(c("-include_kegg","--include_kegg"),type="logical",default=TRUE,help="If TRUE, include KEGG pathways in the output"),
make_option(c("-file_has_header","--file_has_header"),type="logical",default=TRUE,help="If this option is set to TRUE, the tool will assume that the ranked gene-list has a column heading and the gene names commence on the second line")
)
parser <- OptionParser(usage = "%prog [options] file", option_list=option_list)
args = parse_args(parser)
# Vars:
id_file <- args$id_file
out_tab <- args$out_tab
id_type <- args$id_type
organism <- args$organism
include_go <- args$include_go
include_kegg <- args$include_kegg
file_has_header = args$file_has_header
## If testing locally, change to TRUE and arguments will be set below
run_local <- FALSE
if (run_local) {
id_file <- "genelist.txt"
out_tab <- "anno.txt"
id_type <- "SYMBOL"
organism <- "hs"
include_go <- FALSE
include_kegg <- FALSE
file_has_header <- FALSE
}
ids <- as.character(read.table(id_file)[,1],header=file_has_header)
if(organism == "hs"){
suppressPackageStartupMessages(library(org.Hs.eg.db))
db <- org.Hs.eg.db
} else if (organism == "Mm"){
library(org.Mm.eg.db)
db <- org.Mm.eg.db
} else cat(paste("Organism type not supported", organism))
columns <- c("SYMBOL","ENSEMBL","ENTREZID")
if (include_go) columns <- c(columns, "GO")
if (include_kegg) columns <- c(columns, "PATH")
result <- select(db, keys = ids,keytype = id_type,columns = columns )
write.table(result, file=out_tab,sep="\t",row.names=FALSE,quote=FALSE)
|
#2017402063%%10 --> 3
#3 Consumer Price Index (2003=100)(TURKSTAT) -> "CLOTHING AND FOOTWEAR"
#Homework 2 ----
library(EVDS)
library(lubridate)
library(ggplot2 )
library(ggcorrplot)
library(skimr)
library(data.table)
library(corrplot)
library(GGally)
set_evds_key("O05EUMEwx4")
library(readxl)
library(forecast)
#Data Import and Manipulation --------------------------------------------
data <- get_series(c("TP.FG.J03", "TP.DK.USD.A.YTL", "TP.KTF17", "TP.TUFE1YI.T35", "TP.TUFE1YI.T40", "TP.TG2.Y01"),
start_date = "31-10-2008", end_date = "31-03-2021")
raw_data <- data.table(data$items)
setnames(raw_data, c("TP_FG_J03", "TP_DK_USD_A_YTL", "TP_KTF17", "TP_TUFE1YI_T35", "TP_TUFE1YI_T40", "TP_TG2_Y01"),
c("PriceIndex", "USDTRY", "Interest", "PPIApparel", "PPIShoes", "CCI"))
raw_data$Tarih <- as.Date(as.POSIXct(as.numeric((raw_data$UNIXTIME)), origin = "1970-01-02"))
raw_data$Tarih <- ymd(raw_data$Tarih)
raw_data[,"Year" := year(Tarih)]
raw_data[,"Month" := as.character(lubridate::month(Tarih, label = T))]
raw_data[,"PriceIndex" := as.numeric(PriceIndex)]
raw_data[,"USDTRY" := as.numeric(USDTRY)]
raw_data[,"Interest" := as.numeric(Interest)]
raw_data[,"PPIApparel" := as.numeric(PPIApparel)]
raw_data[,"PPIShoes" := as.numeric(PPIShoes)]
raw_data[,"CCI" := as.numeric(CCI)]
raw_data[,"Month" := as.factor(Month)]
cloth_cpi <- raw_data[1:150,c("Tarih", "Year", "Month", "PriceIndex", "Interest",
"PPIApparel", "USDTRY", "PPIShoes", "CCI")]
str(cloth_cpi)
#Descriptive Analysis ----
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))
cloth_cpi[, mean(PriceIndex), by=Month]
cloth_cpi[, sd(PriceIndex), by=Year]
#Checking Autocorrelation & Cross-Correlations ----
pacf(cloth_cpi$PriceIndex)
ccf(cloth_cpi$PriceIndex, cloth_cpi$Interest)
ccf(cloth_cpi$PriceIndex, cloth_cpi$USDTRY)
ccf(cloth_cpi$PriceIndex, cloth_cpi$PPIApparel)
ccf(cloth_cpi$PriceIndex, cloth_cpi$PPIShoes)
ccf(cloth_cpi$PriceIndex, cloth_cpi$CCI)
cloth_cpi[, PPIApparelLag1 := shift(PPIApparel)]
cloth_cpi[, USDTRYLag1 := shift(USDTRY)]
cloth_cpi[, PriceIndexLag1 := shift(PriceIndex)]
#cloth_cpi[, PriceIndexLag2 := shift(PriceIndex,2)]
cloth_cpi[, InterestLag1 := shift(Interest)]
cloth_cpi[, PPIShoesLag1 := shift(PPIShoes)]
cloth_cpi[, CCILag1 := shift(CCI)]
cloth_cpi <- cloth_cpi[-1]
cloth_cpi[, "Trend" := 1:.N]
ggcorrplot(cor(cloth_cpi[, c("PriceIndex", "Interest", "PPIApparel", "USDTRY", "PPIShoes")]), hc.order = TRUE,
type = "lower", lab = TRUE)
#ggpairs(cloth_cpi[, c("PriceIndex", "InterestLag1", "PPIApparelLag1", "USDTRYLag1", "PPIShoesLag1", "CCILag1")])
ggpairs(cloth_cpi[, c("PriceIndex", "Interest", "PPIApparel", "USDTRY", "PPIShoes", "CCI")])
#Box.test(cloth_cpi$PriceIndex, lag=12, type="Ljung-Box")
#Linear Regression Version 1 ----
ts_reg_trendonly = lm(PriceIndex~Trend,cloth_cpi)
summary(ts_reg_trendonly)
cloth_cpi[, "TrendOnly" := predict(ts_reg_trendonly, cloth_cpi)]
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))+
geom_line(aes(y = TrendOnly, color = 'Trend'))
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex - TrendOnly)) +
labs(y = "Residuals", x = "Date")
#autocorrelation in the residuals#
#Linear Regression Version 2 ----
ts_reg_trendmonth = lm(PriceIndex~Trend + Month,cloth_cpi)
summary(ts_reg_trendmonth)
cloth_cpi[, "TrendMonth" := predict(ts_reg_trendmonth , cloth_cpi)]
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))+
geom_line(aes(y = TrendMonth, color = 'Trend'))
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex - TrendMonth))
#Linear Regression Version 3 (not included in th report)----
cloth_cpi[, PPIApparelLag1 := shift(PPIApparel)]
cloth_cpi[, USDTRYLag1 := shift(USDTRY)]
cloth_cpi[, PriceIndexLag1 := shift(PriceIndex)]
cloth_cpi[, PPIShoesLag1 := shift(PPIShoes)]
cloth_cpi <- cloth_cpi[-1]
cloth_cpi[, "Trend" := 1:.N]
ts_reg_ppi_app = lm(PriceIndex~Trend+Month+PPIApparelLag1, cloth_cpi)
summary(ts_reg_ppi_app)
cloth_cpi[, "V3Predict" := predict(ts_reg_ppi_app, cloth_cpi)]
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))+
geom_line(aes(y = V3Predict, color = 'Trend'))
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex - V3Predict))
#deneme = lm(PriceIndex~PPIApparelLag1, cloth_cpi)
#summary(deneme)
#Linear Regression Version 4 (with USDTRY & PPI Apparel)----
ts_reg_usdppi = lm(PriceIndex~Trend+Month+PPIApparelLag1+USDTRYLag1, cloth_cpi)
summary(ts_reg_usd)
checkresiduals(ts_reg_usdppi)
cloth_cpi[, "V4Predict" := predict(ts_reg_usdppi, cloth_cpi)]
pacf(cloth_cpi$PriceIndex-cloth_cpi$V4Predict)
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))+
geom_line(aes(y = V4Predict, color = 'Trend'))
ggplot(cloth_cpi, aes(x = USDTRY)) +
geom_line(aes(y = PriceIndex - V4Predict))
#Linear Regression Version 5 (with USDTRY and both PPIs, not included in the report) ----
ts_reg_V5 = lm(PriceIndex~Trend+Month+PPIApparelLag1+CCILag1, cloth_cpi)
summary(ts_reg_V5)
plot(ts_reg_V5)
cloth_cpi[, "V5Predict" := predict(ts_reg_V5, cloth_cpi)]
acf(cloth_cpi$PriceIndex-cloth_cpi$V5Predict)
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))+
geom_line(aes(y = V5Predict, color = 'Trend'))
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex - V4Predict))
#AutoRegressive Model Version 6 ----
ts_reg_auto = lm(PriceIndex~Trend+Month+PPIApparelLag1+USDTRYLag1+PriceIndexLag1, cloth_cpi)
summary(ts_reg_auto)
checkresiduals(ts_reg_auto, lag = 12)
cloth_cpi[, "V5Predict" := predict(ts_reg_auto, cloth_cpi)]
acf(cloth_cpi$PriceIndex-cloth_cpi$V5Predict)
Box.test(cloth_cpi$PriceIndex-cloth_cpi$V5Predict, lag=12, type="Ljung-Box")
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))+
geom_line(aes(y = V5Predict, color = 'Trend'))
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex - V6Predict))
| /files/Homework 2/Homework 2.R | no_license | BU-IE-360/spring21-alpserdaroglu | R | false | false | 6,146 | r | #2017402063%%10 --> 3
#3 Consumer Price Index (2003=100)(TURKSTAT) -> "CLOTHING AND FOOTWEAR"
#Homework 2 ----
library(EVDS)
library(lubridate)
library(ggplot2 )
library(ggcorrplot)
library(skimr)
library(data.table)
library(corrplot)
library(GGally)
set_evds_key("O05EUMEwx4")
library(readxl)
library(forecast)
#Data Import and Manipulation --------------------------------------------
data <- get_series(c("TP.FG.J03", "TP.DK.USD.A.YTL", "TP.KTF17", "TP.TUFE1YI.T35", "TP.TUFE1YI.T40", "TP.TG2.Y01"),
start_date = "31-10-2008", end_date = "31-03-2021")
raw_data <- data.table(data$items)
setnames(raw_data, c("TP_FG_J03", "TP_DK_USD_A_YTL", "TP_KTF17", "TP_TUFE1YI_T35", "TP_TUFE1YI_T40", "TP_TG2_Y01"),
c("PriceIndex", "USDTRY", "Interest", "PPIApparel", "PPIShoes", "CCI"))
raw_data$Tarih <- as.Date(as.POSIXct(as.numeric((raw_data$UNIXTIME)), origin = "1970-01-02"))
raw_data$Tarih <- ymd(raw_data$Tarih)
raw_data[,"Year" := year(Tarih)]
raw_data[,"Month" := as.character(lubridate::month(Tarih, label = T))]
raw_data[,"PriceIndex" := as.numeric(PriceIndex)]
raw_data[,"USDTRY" := as.numeric(USDTRY)]
raw_data[,"Interest" := as.numeric(Interest)]
raw_data[,"PPIApparel" := as.numeric(PPIApparel)]
raw_data[,"PPIShoes" := as.numeric(PPIShoes)]
raw_data[,"CCI" := as.numeric(CCI)]
raw_data[,"Month" := as.factor(Month)]
cloth_cpi <- raw_data[1:150,c("Tarih", "Year", "Month", "PriceIndex", "Interest",
"PPIApparel", "USDTRY", "PPIShoes", "CCI")]
str(cloth_cpi)
#Descriptive Analysis ----
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))
cloth_cpi[, mean(PriceIndex), by=Month]
cloth_cpi[, sd(PriceIndex), by=Year]
#Checking Autocorrelation & Cross-Correlations ----
pacf(cloth_cpi$PriceIndex)
ccf(cloth_cpi$PriceIndex, cloth_cpi$Interest)
ccf(cloth_cpi$PriceIndex, cloth_cpi$USDTRY)
ccf(cloth_cpi$PriceIndex, cloth_cpi$PPIApparel)
ccf(cloth_cpi$PriceIndex, cloth_cpi$PPIShoes)
ccf(cloth_cpi$PriceIndex, cloth_cpi$CCI)
cloth_cpi[, PPIApparelLag1 := shift(PPIApparel)]
cloth_cpi[, USDTRYLag1 := shift(USDTRY)]
cloth_cpi[, PriceIndexLag1 := shift(PriceIndex)]
#cloth_cpi[, PriceIndexLag2 := shift(PriceIndex,2)]
cloth_cpi[, InterestLag1 := shift(Interest)]
cloth_cpi[, PPIShoesLag1 := shift(PPIShoes)]
cloth_cpi[, CCILag1 := shift(CCI)]
cloth_cpi <- cloth_cpi[-1]
cloth_cpi[, "Trend" := 1:.N]
ggcorrplot(cor(cloth_cpi[, c("PriceIndex", "Interest", "PPIApparel", "USDTRY", "PPIShoes")]), hc.order = TRUE,
type = "lower", lab = TRUE)
#ggpairs(cloth_cpi[, c("PriceIndex", "InterestLag1", "PPIApparelLag1", "USDTRYLag1", "PPIShoesLag1", "CCILag1")])
ggpairs(cloth_cpi[, c("PriceIndex", "Interest", "PPIApparel", "USDTRY", "PPIShoes", "CCI")])
#Box.test(cloth_cpi$PriceIndex, lag=12, type="Ljung-Box")
#Linear Regression Version 1 ----
ts_reg_trendonly = lm(PriceIndex~Trend,cloth_cpi)
summary(ts_reg_trendonly)
cloth_cpi[, "TrendOnly" := predict(ts_reg_trendonly, cloth_cpi)]
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))+
geom_line(aes(y = TrendOnly, color = 'Trend'))
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex - TrendOnly)) +
labs(y = "Residuals", x = "Date")
#autocorrelation in the residuals#
#Linear Regression Version 2 ----
ts_reg_trendmonth = lm(PriceIndex~Trend + Month,cloth_cpi)
summary(ts_reg_trendmonth)
cloth_cpi[, "TrendMonth" := predict(ts_reg_trendmonth , cloth_cpi)]
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))+
geom_line(aes(y = TrendMonth, color = 'Trend'))
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex - TrendMonth))
#Linear Regression Version 3 (not included in th report)----
cloth_cpi[, PPIApparelLag1 := shift(PPIApparel)]
cloth_cpi[, USDTRYLag1 := shift(USDTRY)]
cloth_cpi[, PriceIndexLag1 := shift(PriceIndex)]
cloth_cpi[, PPIShoesLag1 := shift(PPIShoes)]
cloth_cpi <- cloth_cpi[-1]
cloth_cpi[, "Trend" := 1:.N]
ts_reg_ppi_app = lm(PriceIndex~Trend+Month+PPIApparelLag1, cloth_cpi)
summary(ts_reg_ppi_app)
cloth_cpi[, "V3Predict" := predict(ts_reg_ppi_app, cloth_cpi)]
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))+
geom_line(aes(y = V3Predict, color = 'Trend'))
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex - V3Predict))
#deneme = lm(PriceIndex~PPIApparelLag1, cloth_cpi)
#summary(deneme)
#Linear Regression Version 4 (with USDTRY & PPI Apparel)----
ts_reg_usdppi = lm(PriceIndex~Trend+Month+PPIApparelLag1+USDTRYLag1, cloth_cpi)
summary(ts_reg_usd)
checkresiduals(ts_reg_usdppi)
cloth_cpi[, "V4Predict" := predict(ts_reg_usdppi, cloth_cpi)]
pacf(cloth_cpi$PriceIndex-cloth_cpi$V4Predict)
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))+
geom_line(aes(y = V4Predict, color = 'Trend'))
ggplot(cloth_cpi, aes(x = USDTRY)) +
geom_line(aes(y = PriceIndex - V4Predict))
#Linear Regression Version 5 (with USDTRY and both PPIs, not included in the report) ----
ts_reg_V5 = lm(PriceIndex~Trend+Month+PPIApparelLag1+CCILag1, cloth_cpi)
summary(ts_reg_V5)
plot(ts_reg_V5)
cloth_cpi[, "V5Predict" := predict(ts_reg_V5, cloth_cpi)]
acf(cloth_cpi$PriceIndex-cloth_cpi$V5Predict)
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))+
geom_line(aes(y = V5Predict, color = 'Trend'))
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex - V4Predict))
#AutoRegressive Model Version 6 ----
ts_reg_auto = lm(PriceIndex~Trend+Month+PPIApparelLag1+USDTRYLag1+PriceIndexLag1, cloth_cpi)
summary(ts_reg_auto)
checkresiduals(ts_reg_auto, lag = 12)
cloth_cpi[, "V5Predict" := predict(ts_reg_auto, cloth_cpi)]
acf(cloth_cpi$PriceIndex-cloth_cpi$V5Predict)
Box.test(cloth_cpi$PriceIndex-cloth_cpi$V5Predict, lag=12, type="Ljung-Box")
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex, color = 'Price Index'))+
geom_line(aes(y = V5Predict, color = 'Trend'))
ggplot(cloth_cpi, aes(x = Tarih)) +
geom_line(aes(y = PriceIndex - V6Predict))
|
## Stagerunners are [tree structures](https://en.wikipedia.org/wiki/Tree_(data_structure))
## and come with a natural set of operations, like taking the predecessor,
## successor, and root of a node. However, these are not entirely simple to
## implement in a manner that is implementation-independent.
##
## Specifically, we recognize that the notion of a node successor and
## predecessor is implementation agnostic as long as we have access
## to class methods that provide access to a node's parent and children.
## In this case, we can write an implementation-agnostic version that
## works regardless of whether the object is an S3, S4, or R6
## object.
#' @include treeSkeleton-initialize.R treeSkeleton-predecessor.R
#' treeSkeleton-successor.R treeSkeleton-parent_index.R
NULL
#' Find the root node of the tree (the only one with no parent).
#'
#' @name treeSkeleton__root
#' @return The root node of the tree or NULL if empty tree.
treeSkeleton__root <- function() {
if (is.null(self$parent())) self
else self$parent()
}
#' Find the first leaf in a tree.
#'
#' @name treeSkeleton__first_leaf
#' @return The first leaf, that is, the first terminal child node.
treeSkeleton__first_leaf <- function() {
if (length(self$children()) == 0) self
else self$children()[[1]]$first_leaf()
}
#' Find the last leaf in a tree.
#'
#' @name treeSkeleton__last_leaf
#' @return The last leaf, that is, the last terminal child node.
treeSkeleton__last_leaf <- function() {
if (length(childs <- self$children()) == 0) self
else childs[[length(childs)]]$last_leaf()
}
#' Find the parent of the current object wrapped in a treeSkeleton.
#' @name treeSkeleton__parent
treeSkeleton__parent <- function() {
if (!is.unitialized_field(self$.parent)) return(self$.parent)
self$.parent <-
if (is.null(obj <- OOP_type_independent_method(self$object, self$parent_caller))) NULL
else treeSkeleton$new(obj, parent_caller = self$parent_caller,
children_caller = self$children_caller)
}
#' Find the children of the current object wrapped in treeSkeletons.
#' @name treeSkeleton__children
treeSkeleton__children <- function() {
if (!is.unitialized_field(self$.children)) return(self$.children)
prechildren <- OOP_type_independent_method(self$object, self$children_caller)
self$.children <- lapply(prechildren, treeSkeleton$new,
parent_caller = self$parent_caller)
}
#' Find the key with the given index using the names of the lists
#' that parametrize each node's children.
#'
#' For example, if our tree structure is given by
#' \code{list(a = list(b = 1, c = 2))}
#' then calling \code{find('a/b')} on the root node will return \code{1}.
#'
#' @name treeSkeleton__find
#' @param key character. The key to find in the given tree structure,
#' whether nodes are named by their name in the \code{children()}
#' list. Numeric indices can be used to refer to unnamed nodes.
#' For example, if key is \code{a/2/b}, this method would try to find
#' the current node's child \code{a}'s second child's \code{b} child.
#' (Just look at the examples).
#' @return the subtree or terminal node with the given key.
#' @examples
#' \dontrun{
#' sr <- stageRunner$new(new.env(), list(a = list(force, list(b = function(x) x + 1))))
#' stagerunner:::treeSkeleton$new(sr)$find('a/2/b') # function(x) x + 1
#' }
treeSkeleton__find <- function(key) {
## Currently out of service! Will be back shortly.
# stopifnot(is.character(key))
# if (length(key) == 0 || identical(key, '')) return(self$object)
# # Extract "foo" from "foo/bar/baz"
# subkey <- regmatches(key, regexec('^[^/]+', key))[[1]]
# key_remainder <- substr(key, nchar(subkey) + 2, nchar(key))
# if (grepl('^[0-9]+', subkey)) {
# subkey <- as.integer(subkey)
# key_falls_within_children <- length(self$children()) >= subkey
# stopifnot(key_falls_within_children)
# } else {
# matches <- grepl(subkey, names(self$children()))
# stopifnot(length(matches) == 1)
# key <- which(matches)
# }
# self$children()[[key]]$find(key_remainder)
}
#' This class implements iterators for a tree-based structure
#' without an actual underlying tree.
#'
#' In other dynamic languages, this kind of behavior would be called
#' duck typing. Imagine we have an object \code{x} that is of some
#' reference class. This object has a tree structure, and each node
#' in the tree has a parent and children. However, the methods to
#' fetch a node's parent or its children may have arbitrary names.
#' These names are stored in \code{treeSkeleton}'s \code{parent_caller}
#' and \code{children_caller} fields. Thus, if \code{x$methods()}
#' refers to \code{x}'s children and \code{x$parent_method()} refers
#' to \code{x}'s parent, we could define a \code{treeSkeleton} for
#' \code{x} by writing \code{treeSkeleton$new(x, 'parent_method', 'methods')}.
#'
#' The iterators on a \code{treeSkeleton} use the standard definition of
#' successor, predecessor, ancestor, etc.
#'
#' @name treeSkeleton
#' @docType class
#' @format NULL
treeSkeleton_ <- R6::R6Class('treeSkeleton',
public = list(
object = 'ANY',
## As long as we know how to get an objects parent and children,
## we will be able to determine all the nice derived methods below.
parent_caller = 'character',
children_caller = 'character',
.children = 'ANY',
.parent = 'ANY',
initialize = treeSkeleton__initialize,
successor = treeSkeleton__successor,
predecessor = treeSkeleton__predecessor,
parent = treeSkeleton__parent,
children = treeSkeleton__children,
root = treeSkeleton__root,
first_leaf = treeSkeleton__first_leaf,
last_leaf = treeSkeleton__last_leaf,
find = treeSkeleton__find,
.parent_index = treeSkeleton__.parent_index,
show = function() { cat("treeSkeleton wrapping:\n"); print(self$object) }
)
)
## Some fancy tricks to make `treeSkeleton(...)` and `treeSkeleton(...)`
## have the same effect, just like in traditional reference classes.
#' @export
treeSkeleton <- structure(
function(...) { treeSkeleton_$new(...) },
class = "treeSkeleton_"
)
#' @export
`$.treeSkeleton_` <- function(...) {
stopifnot(identical(..2, "new"))
..1
}
uninitialized_field <- function() {
structure(NULL, class = "uninitialized_field")
}
is.unitialized_field <- function(x) {
is(x, "uninitialized_field")
}
| /R/treeSkeleton.R | permissive | mindis/stagerunner | R | false | false | 6,433 | r | ## Stagerunners are [tree structures](https://en.wikipedia.org/wiki/Tree_(data_structure))
## and come with a natural set of operations, like taking the predecessor,
## successor, and root of a node. However, these are not entirely simple to
## implement in a manner that is implementation-independent.
##
## Specifically, we recognize that the notion of a node successor and
## predecessor is implementation agnostic as long as we have access
## to class methods that provide access to a node's parent and children.
## In this case, we can write an implementation-agnostic version that
## works regardless of whether the object is an S3, S4, or R6
## object.
#' @include treeSkeleton-initialize.R treeSkeleton-predecessor.R
#' treeSkeleton-successor.R treeSkeleton-parent_index.R
NULL
#' Find the root node of the tree (the only one with no parent).
#'
#' @name treeSkeleton__root
#' @return The root node of the tree or NULL if empty tree.
treeSkeleton__root <- function() {
if (is.null(self$parent())) self
else self$parent()
}
#' Find the first leaf in a tree.
#'
#' @name treeSkeleton__first_leaf
#' @return The first leaf, that is, the first terminal child node.
treeSkeleton__first_leaf <- function() {
if (length(self$children()) == 0) self
else self$children()[[1]]$first_leaf()
}
#' Find the last leaf in a tree.
#'
#' @name treeSkeleton__last_leaf
#' @return The last leaf, that is, the last terminal child node.
treeSkeleton__last_leaf <- function() {
if (length(childs <- self$children()) == 0) self
else childs[[length(childs)]]$last_leaf()
}
#' Find the parent of the current object wrapped in a treeSkeleton.
#' @name treeSkeleton__parent
treeSkeleton__parent <- function() {
if (!is.unitialized_field(self$.parent)) return(self$.parent)
self$.parent <-
if (is.null(obj <- OOP_type_independent_method(self$object, self$parent_caller))) NULL
else treeSkeleton$new(obj, parent_caller = self$parent_caller,
children_caller = self$children_caller)
}
#' Find the children of the current object wrapped in treeSkeletons.
#' @name treeSkeleton__children
treeSkeleton__children <- function() {
if (!is.unitialized_field(self$.children)) return(self$.children)
prechildren <- OOP_type_independent_method(self$object, self$children_caller)
self$.children <- lapply(prechildren, treeSkeleton$new,
parent_caller = self$parent_caller)
}
#' Find the key with the given index using the names of the lists
#' that parametrize each node's children.
#'
#' For example, if our tree structure is given by
#' \code{list(a = list(b = 1, c = 2))}
#' then calling \code{find('a/b')} on the root node will return \code{1}.
#'
#' @name treeSkeleton__find
#' @param key character. The key to find in the given tree structure,
#' whether nodes are named by their name in the \code{children()}
#' list. Numeric indices can be used to refer to unnamed nodes.
#' For example, if key is \code{a/2/b}, this method would try to find
#' the current node's child \code{a}'s second child's \code{b} child.
#' (Just look at the examples).
#' @return the subtree or terminal node with the given key.
#' @examples
#' \dontrun{
#' sr <- stageRunner$new(new.env(), list(a = list(force, list(b = function(x) x + 1))))
#' stagerunner:::treeSkeleton$new(sr)$find('a/2/b') # function(x) x + 1
#' }
treeSkeleton__find <- function(key) {
## Currently out of service! Will be back shortly.
# stopifnot(is.character(key))
# if (length(key) == 0 || identical(key, '')) return(self$object)
# # Extract "foo" from "foo/bar/baz"
# subkey <- regmatches(key, regexec('^[^/]+', key))[[1]]
# key_remainder <- substr(key, nchar(subkey) + 2, nchar(key))
# if (grepl('^[0-9]+', subkey)) {
# subkey <- as.integer(subkey)
# key_falls_within_children <- length(self$children()) >= subkey
# stopifnot(key_falls_within_children)
# } else {
# matches <- grepl(subkey, names(self$children()))
# stopifnot(length(matches) == 1)
# key <- which(matches)
# }
# self$children()[[key]]$find(key_remainder)
}
#' This class implements iterators for a tree-based structure
#' without an actual underlying tree.
#'
#' In other dynamic languages, this kind of behavior would be called
#' duck typing. Imagine we have an object \code{x} that is of some
#' reference class. This object has a tree structure, and each node
#' in the tree has a parent and children. However, the methods to
#' fetch a node's parent or its children may have arbitrary names.
#' These names are stored in \code{treeSkeleton}'s \code{parent_caller}
#' and \code{children_caller} fields. Thus, if \code{x$methods()}
#' refers to \code{x}'s children and \code{x$parent_method()} refers
#' to \code{x}'s parent, we could define a \code{treeSkeleton} for
#' \code{x} by writing \code{treeSkeleton$new(x, 'parent_method', 'methods')}.
#'
#' The iterators on a \code{treeSkeleton} use the standard definition of
#' successor, predecessor, ancestor, etc.
#'
#' @name treeSkeleton
#' @docType class
#' @format NULL
treeSkeleton_ <- R6::R6Class('treeSkeleton',
public = list(
object = 'ANY',
## As long as we know how to get an objects parent and children,
## we will be able to determine all the nice derived methods below.
parent_caller = 'character',
children_caller = 'character',
.children = 'ANY',
.parent = 'ANY',
initialize = treeSkeleton__initialize,
successor = treeSkeleton__successor,
predecessor = treeSkeleton__predecessor,
parent = treeSkeleton__parent,
children = treeSkeleton__children,
root = treeSkeleton__root,
first_leaf = treeSkeleton__first_leaf,
last_leaf = treeSkeleton__last_leaf,
find = treeSkeleton__find,
.parent_index = treeSkeleton__.parent_index,
show = function() { cat("treeSkeleton wrapping:\n"); print(self$object) }
)
)
## Some fancy tricks to make `treeSkeleton(...)` and `treeSkeleton(...)`
## have the same effect, just like in traditional reference classes.
#' @export
treeSkeleton <- structure(
function(...) { treeSkeleton_$new(...) },
class = "treeSkeleton_"
)
#' @export
`$.treeSkeleton_` <- function(...) {
stopifnot(identical(..2, "new"))
..1
}
uninitialized_field <- function() {
structure(NULL, class = "uninitialized_field")
}
is.unitialized_field <- function(x) {
is(x, "uninitialized_field")
}
|
#EasyCharts团队出品,如有商用必究,
#如需使用与深入学习,请联系微信:EasyCharts
library(ggplot2)
library(data.table) #提供data.table()函数
library(ggTimeSeries)
library(RColorBrewer)
set.seed(1234)
dat <- data.table(
date = seq(as.Date("1/01/2014", "%d/%m/%Y"),as.Date("31/12/2017", "%d/%m/%Y"),"days"),
ValueCol = runif(1461)
)
dat[, ValueCol := ValueCol + (strftime(date,"%u") %in% c(6,7) * runif(1) * 0.75), .I]
dat[, ValueCol := ValueCol + (abs(as.numeric(strftime(date,"%m")) - 6.5)) * runif(1) * 0.75, .I]
dat$Year<- as.integer(strftime(dat$date, '%Y')) #年份
dat$month <- as.integer(strftime(dat$date, '%m')) #月份
dat$week<- as.integer(strftime(dat$date, '%W')) #周数
MonthLabels <- dat[,list(meanWkofYr = mean(week)), by = c('month') ]
MonthLabels$month <-month.abb[MonthLabels$month]
ggplot(data=dat,aes(date=date,fill=ValueCol))+
stat_calendar_heatmap()+
scale_fill_gradientn(colours= rev(brewer.pal(11,'Spectral')))+
facet_wrap(~Year, ncol = 1,strip.position = "right")+
scale_y_continuous(breaks=seq(7, 1, -1),labels=c("Mon","Tue","Wed","Thu","Fri","Sat","Sun"))+
scale_x_continuous(breaks = MonthLabels[,meanWkofYr], labels = MonthLabels[, month],expand = c(0, 0)) +
xlab(NULL)+
ylab(NULL)+
theme( panel.background = element_blank(),
panel.border = element_rect(colour="grey60",fill=NA),
strip.background = element_blank(),
strip.text = element_text(size=13,face="plain",color="black"),
axis.line=element_line(colour="black",size=0.25),
axis.title=element_text(size=10,face="plain",color="black"),
axis.text = element_text(size=10,face="plain",color="black"))
#---------------------------------------------------------
library(dplyr)
dat17 <- filter(dat,Year==2017)[,c(1,2)]
dat17$month <- as.integer(strftime(dat17$date, '%m')) #月份
dat17$monthf<-factor(dat17$month,levels=as.character(1:12),
labels=c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"),ordered=TRUE)
dat17$weekday<-as.integer(strftime(dat17$date, '%u'))#周数
dat17$weekdayf<-factor(dat17$weekday,levels=(1:7),
labels=(c("Mon","Tue","Wed","Thu","Fri","Sat","Sun")),ordered=TRUE)
dat17$yearmonth<- strftime(dat17$date, '%m%Y') #月份
dat17$yearmonthf<-factor(dat17$yearmonth)
dat17$week<- as.integer(strftime(dat17$date, '%W'))#周数
dat17<-dat17 %>% group_by(monthf)%>%mutate(monthweek=1+week-min(week))
dat17$day<-strftime(dat17$date, "%d")
ggplot(dat17, aes(weekdayf, monthweek, fill=ValueCol)) +
geom_tile(colour = "white") +
scale_fill_gradientn(colours=rev(brewer.pal(11,'Spectral')))+
geom_text(aes(label=day),size=3)+
facet_wrap(~monthf ,nrow=3) +
scale_y_reverse()+
xlab("Day") + ylab("Week of the month") +
theme(strip.text = element_text(size=11,face="plain",color="black"))
| /第6章 时间序列型图表/图6-2-2 日历图.R | no_license | Easy-Shu/Beautiful-Visualization-with-R | R | false | false | 2,886 | r |
#EasyCharts团队出品,如有商用必究,
#如需使用与深入学习,请联系微信:EasyCharts
library(ggplot2)
library(data.table) #提供data.table()函数
library(ggTimeSeries)
library(RColorBrewer)
set.seed(1234)
dat <- data.table(
date = seq(as.Date("1/01/2014", "%d/%m/%Y"),as.Date("31/12/2017", "%d/%m/%Y"),"days"),
ValueCol = runif(1461)
)
dat[, ValueCol := ValueCol + (strftime(date,"%u") %in% c(6,7) * runif(1) * 0.75), .I]
dat[, ValueCol := ValueCol + (abs(as.numeric(strftime(date,"%m")) - 6.5)) * runif(1) * 0.75, .I]
dat$Year<- as.integer(strftime(dat$date, '%Y')) #年份
dat$month <- as.integer(strftime(dat$date, '%m')) #月份
dat$week<- as.integer(strftime(dat$date, '%W')) #周数
MonthLabels <- dat[,list(meanWkofYr = mean(week)), by = c('month') ]
MonthLabels$month <-month.abb[MonthLabels$month]
ggplot(data=dat,aes(date=date,fill=ValueCol))+
stat_calendar_heatmap()+
scale_fill_gradientn(colours= rev(brewer.pal(11,'Spectral')))+
facet_wrap(~Year, ncol = 1,strip.position = "right")+
scale_y_continuous(breaks=seq(7, 1, -1),labels=c("Mon","Tue","Wed","Thu","Fri","Sat","Sun"))+
scale_x_continuous(breaks = MonthLabels[,meanWkofYr], labels = MonthLabels[, month],expand = c(0, 0)) +
xlab(NULL)+
ylab(NULL)+
theme( panel.background = element_blank(),
panel.border = element_rect(colour="grey60",fill=NA),
strip.background = element_blank(),
strip.text = element_text(size=13,face="plain",color="black"),
axis.line=element_line(colour="black",size=0.25),
axis.title=element_text(size=10,face="plain",color="black"),
axis.text = element_text(size=10,face="plain",color="black"))
#---------------------------------------------------------
library(dplyr)
dat17 <- filter(dat,Year==2017)[,c(1,2)]
dat17$month <- as.integer(strftime(dat17$date, '%m')) #月份
dat17$monthf<-factor(dat17$month,levels=as.character(1:12),
labels=c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"),ordered=TRUE)
dat17$weekday<-as.integer(strftime(dat17$date, '%u'))#周数
dat17$weekdayf<-factor(dat17$weekday,levels=(1:7),
labels=(c("Mon","Tue","Wed","Thu","Fri","Sat","Sun")),ordered=TRUE)
dat17$yearmonth<- strftime(dat17$date, '%m%Y') #月份
dat17$yearmonthf<-factor(dat17$yearmonth)
dat17$week<- as.integer(strftime(dat17$date, '%W'))#周数
dat17<-dat17 %>% group_by(monthf)%>%mutate(monthweek=1+week-min(week))
dat17$day<-strftime(dat17$date, "%d")
ggplot(dat17, aes(weekdayf, monthweek, fill=ValueCol)) +
geom_tile(colour = "white") +
scale_fill_gradientn(colours=rev(brewer.pal(11,'Spectral')))+
geom_text(aes(label=day),size=3)+
facet_wrap(~monthf ,nrow=3) +
scale_y_reverse()+
xlab("Day") + ylab("Week of the month") +
theme(strip.text = element_text(size=11,face="plain",color="black"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PhaseTypeGenetics.R
\docType{package}
\name{PhaseTypeGenetics}
\alias{PhaseTypeGenetics}
\alias{PhaseTypeGenetics-package}
\title{PhaseTypeGenetics: A package providing several properties of the phase-type
distribution with applications in population genetics}
\description{
The PhaseTypeGenetics package provides several functions to compute
properties of the discrete and continous phase-type distribution.
These functions are
\itemize{
\item contphasetype
\item discphasetype
\item dphasetype
\item maxima
\item phmean
\item minima
\item moments
\item pphasetype
\item qphasetype
\item rphasetype
\item phsum
\item summary
\item phvar
}
Furthermore, the package includes some functions that are useful in
connection to population genetics. These functions are
\itemize{
\item BlockCountProcess
\item discretization
\item dSegregatingSites
\item RewTransDistribution
\item SiteFrequencies,
}
The function \code{BlockCountProcess} computes the state space
and the corresponding rate matrix for
the block counting process for a given sample size \eqn{n} in
the standard coalescent model. The function \code{discretization}
discretizes a continuous phase-type distribution, and can for example
be used to obtain the distribution of the number of segregating sites,
when the mutation rate is known and equal to \eqn{\lambda}.
\code{dSegregatingSites} computes the distribution of the number
of segregating sites for a given sample size and a given mutation rate.
\code{RewTransDistribution} is an implementation of the reward
transformation and can be used to obtain the distribution of the
total branch length. Finally, the function \code{Site Frequencies}
computes the distribution of the site frequencies, the number of
segregating sites and the tail statistics using phase-type theory.
}
\section{PhaseTypeGenetics general functions}{
\itemize{
\item \code{\link{contphasetype}}
\item \code{\link{discphasetype}}
\item \code{\link{dphasetype}}
\item \code{\link{maxima}}
\item \code{\link{phmean}}
\item \code{\link{minima}}
\item \code{\link{moments}}
\item \code{\link{pphasetype}}
\item \code{\link{qphasetype}}
\item \code{\link{rphasetype}}
\item \code{\link{phsum}}
\item \code{\link[=summary.discphasetype]{summary}}
\item \code{\link{phvar}}.
}
}
\section{PhaseTypeGenetics functions for applications in population genetics}{
\itemize{
\item \code{\link{BlockCountProcess}}
\item \code{\link{discretization}}
\item \code{\link{dSegregatingSites}}
\item \code{\link{RewTransDistribution}}
\item \code{\link{SiteFrequencies}}.
}
Note that the functions \code{\link{BlockCountProcess}}, \code{\link{SiteFrequencies}}
and \code{\link{dSegregatingSites}} make use of the package 'partitions'.
Make sure to install this package to be able to use the functions.
}
| /man/PhaseTypeGenetics.Rd | no_license | aumath-advancedr2019/PhaseTypeGenetics | R | false | true | 2,849 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PhaseTypeGenetics.R
\docType{package}
\name{PhaseTypeGenetics}
\alias{PhaseTypeGenetics}
\alias{PhaseTypeGenetics-package}
\title{PhaseTypeGenetics: A package providing several properties of the phase-type
distribution with applications in population genetics}
\description{
The PhaseTypeGenetics package provides several functions to compute
properties of the discrete and continous phase-type distribution.
These functions are
\itemize{
\item contphasetype
\item discphasetype
\item dphasetype
\item maxima
\item phmean
\item minima
\item moments
\item pphasetype
\item qphasetype
\item rphasetype
\item phsum
\item summary
\item phvar
}
Furthermore, the package includes some functions that are useful in
connection to population genetics. These functions are
\itemize{
\item BlockCountProcess
\item discretization
\item dSegregatingSites
\item RewTransDistribution
\item SiteFrequencies,
}
The function \code{BlockCountProcess} computes the state space
and the corresponding rate matrix for
the block counting process for a given sample size \eqn{n} in
the standard coalescent model. The function \code{discretization}
discretizes a continuous phase-type distribution, and can for example
be used to obtain the distribution of the number of segregating sites,
when the mutation rate is known and equal to \eqn{\lambda}.
\code{dSegregatingSites} computes the distribution of the number
of segregating sites for a given sample size and a given mutation rate.
\code{RewTransDistribution} is an implementation of the reward
transformation and can be used to obtain the distribution of the
total branch length. Finally, the function \code{Site Frequencies}
computes the distribution of the site frequencies, the number of
segregating sites and the tail statistics using phase-type theory.
}
\section{PhaseTypeGenetics general functions}{
\itemize{
\item \code{\link{contphasetype}}
\item \code{\link{discphasetype}}
\item \code{\link{dphasetype}}
\item \code{\link{maxima}}
\item \code{\link{phmean}}
\item \code{\link{minima}}
\item \code{\link{moments}}
\item \code{\link{pphasetype}}
\item \code{\link{qphasetype}}
\item \code{\link{rphasetype}}
\item \code{\link{phsum}}
\item \code{\link[=summary.discphasetype]{summary}}
\item \code{\link{phvar}}.
}
}
\section{PhaseTypeGenetics functions for applications in population genetics}{
\itemize{
\item \code{\link{BlockCountProcess}}
\item \code{\link{discretization}}
\item \code{\link{dSegregatingSites}}
\item \code{\link{RewTransDistribution}}
\item \code{\link{SiteFrequencies}}.
}
Note that the functions \code{\link{BlockCountProcess}}, \code{\link{SiteFrequencies}}
and \code{\link{dSegregatingSites}} make use of the package 'partitions'.
Make sure to install this package to be able to use the functions.
}
|
#' @import hitandrun
#' @export
sampleParameters <- function(model, numberOfSamples = 1000) {
stopifnot(numberOfSamples > 0)
stopifnot(all(model$constraints$types == "C"))
if (!isModelConsistent(model)) {
stop("Model infeasible.")
}
nrAlternatives <- nrow(model$perfToModelVariables)
nrCriteria <- ncol(model$perfToModelVariables)
model <- eliminateEpsilon(model)
constraints <- model$constraints
constraints$dir[which(constraints$dir == "==")] <- "="
geq <- which(constraints$dir == ">=")
for (i in geq) {
constraints$rhs[i] <- -1 * constraints$rhs[i]
constraints$lhs[i, ] <- -1 * constraints$lhs[i, ]
}
constraints$dir[geq] <- "<="
names(constraints)[1] <- "constr"
constraints[[4]] <- NULL
return (hitandrun(constraints, n.samples = numberOfSamples, thin.fn = function(n) { n^3 }))
}
#' @export
pwi <- function(model, samples, accuracy = 1e-16) {
stopifnot(nrow(samples) > 0)
nrAlternatives <- nrow(model$perfToModelVariables)
nrCriteria <- ncol(model$perfToModelVariables)
model <- eliminateEpsilon(model)
result <- matrix(data = 0, nrow = nrAlternatives, ncol = nrAlternatives)
for (i in seq_len(nrow(samples))) {
ranks <- getRanksFromF(model, samples[i, ], accuracy)
for (i in seq_len(nrAlternatives)) {
for (j in seq_len(nrAlternatives)) {
if (ranks[i] < ranks[j]) {
result[i, j] <- result[i, j] + 1
}
}
}
}
result <- result / nrow(samples)
return (result)
}
#' @export
rai <- function(model, samples) {
stopifnot(nrow(samples) > 0)
nrAlternatives <- nrow(model$perfToModelVariables)
nrCriteria <- ncol(model$perfToModelVariables)
model <- eliminateEpsilon(model)
result <- matrix(data = 0, nrow = nrAlternatives, ncol = nrAlternatives)
for (i in seq_len(nrow(samples))) {
ranks <- getRanksFromF(model, samples[i, ])
for (i in seq_len(nrAlternatives)) {
result[i, ranks[i]] <- result[i, ranks[i]] + 1
}
}
result <- result / nrow(samples)
return (result)
}
#' @export
cv <- function(model, samples) {
stopifnot(nrow(samples) > 0)
nrAlternatives <- nrow(model$perfToModelVariables)
nrCriteria <- ncol(model$perfToModelVariables)
model <- eliminateEpsilon(model)
weightIndices <- c()
for (j in seq_len(length(model$chPoints))) {
if (model$criterionPreferenceDirection[j] == "g") {
weightIndices <- c(weightIndices,
model$firstChPointVariableIndex[j] + model$chPoints[j] - 2)
} else { # "c"
weightIndices <- c(weightIndices,
model$firstChPointVariableIndex[j])
}
}
selectedColumns <- samples[, weightIndices]
return (mean(apply(selectedColumns, 2, sd) / colMeans(selectedColumns)))
}
| /R/sampling.R | no_license | kciomek/vfranking | R | false | false | 2,927 | r | #' @import hitandrun
#' @export
sampleParameters <- function(model, numberOfSamples = 1000) {
stopifnot(numberOfSamples > 0)
stopifnot(all(model$constraints$types == "C"))
if (!isModelConsistent(model)) {
stop("Model infeasible.")
}
nrAlternatives <- nrow(model$perfToModelVariables)
nrCriteria <- ncol(model$perfToModelVariables)
model <- eliminateEpsilon(model)
constraints <- model$constraints
constraints$dir[which(constraints$dir == "==")] <- "="
geq <- which(constraints$dir == ">=")
for (i in geq) {
constraints$rhs[i] <- -1 * constraints$rhs[i]
constraints$lhs[i, ] <- -1 * constraints$lhs[i, ]
}
constraints$dir[geq] <- "<="
names(constraints)[1] <- "constr"
constraints[[4]] <- NULL
return (hitandrun(constraints, n.samples = numberOfSamples, thin.fn = function(n) { n^3 }))
}
#' @export
pwi <- function(model, samples, accuracy = 1e-16) {
stopifnot(nrow(samples) > 0)
nrAlternatives <- nrow(model$perfToModelVariables)
nrCriteria <- ncol(model$perfToModelVariables)
model <- eliminateEpsilon(model)
result <- matrix(data = 0, nrow = nrAlternatives, ncol = nrAlternatives)
for (i in seq_len(nrow(samples))) {
ranks <- getRanksFromF(model, samples[i, ], accuracy)
for (i in seq_len(nrAlternatives)) {
for (j in seq_len(nrAlternatives)) {
if (ranks[i] < ranks[j]) {
result[i, j] <- result[i, j] + 1
}
}
}
}
result <- result / nrow(samples)
return (result)
}
#' @export
rai <- function(model, samples) {
stopifnot(nrow(samples) > 0)
nrAlternatives <- nrow(model$perfToModelVariables)
nrCriteria <- ncol(model$perfToModelVariables)
model <- eliminateEpsilon(model)
result <- matrix(data = 0, nrow = nrAlternatives, ncol = nrAlternatives)
for (i in seq_len(nrow(samples))) {
ranks <- getRanksFromF(model, samples[i, ])
for (i in seq_len(nrAlternatives)) {
result[i, ranks[i]] <- result[i, ranks[i]] + 1
}
}
result <- result / nrow(samples)
return (result)
}
#' @export
cv <- function(model, samples) {
stopifnot(nrow(samples) > 0)
nrAlternatives <- nrow(model$perfToModelVariables)
nrCriteria <- ncol(model$perfToModelVariables)
model <- eliminateEpsilon(model)
weightIndices <- c()
for (j in seq_len(length(model$chPoints))) {
if (model$criterionPreferenceDirection[j] == "g") {
weightIndices <- c(weightIndices,
model$firstChPointVariableIndex[j] + model$chPoints[j] - 2)
} else { # "c"
weightIndices <- c(weightIndices,
model$firstChPointVariableIndex[j])
}
}
selectedColumns <- samples[, weightIndices]
return (mean(apply(selectedColumns, 2, sd) / colMeans(selectedColumns)))
}
|
library(tidyverse)
library(rvest)
library(stringr)
setwd("~/Dropbox/democrats/election-results-data/greenwood/2016")
county <- "greenwood"
turnouturl <- read_html("http://www.enr-scvotes.org/SC/Greenwood/64682/183626/en/vt_data.html")
sturl <- "http://www.enr-scvotes.org/SC/Greenwood/64682/183626/en/md_data.html?cid=0103&"
potusurl <- "http://www.enr-scvotes.org/SC/Greenwood/64682/183626/en/md_data.html?cid=0104&"
senurl <- "http://www.enr-scvotes.org/SC/Greenwood/64682/183626/en/md_data.html?cid=0105&"
houseurl <- "http://www.enr-scvotes.org/SC/Greenwood/64682/183626/en/md_data.html?cid=0106&"
# Turnout is fairly easy.
# -----------------------
gwooturnout = html_table(html_nodes(turnouturl, "table")[[4]])
names(gwooturnout) <- c("precinct","bc","rv","vtp")
gwooturnout %>%
filter(precinct != "Precinct" & precinct != "Total:") -> gwooturnout
gwooturnout$bc <- as.numeric(gsub(',', '', gwooturnout$bc))
gwooturnout$rv <- as.numeric(gsub(',', '', gwooturnout$rv))
gwooturnout$vtp <- with(gwooturnout, (bc/rv)*100)
#gwooturnout$county <- county
#gwooturnout <- gwooturnout[,c(5,1:4)]
# write_csv(gwooturnout, "2016-ge-gwooturnout.csv")
# Straight-ticket data.
# ---------------------
writeLines(sprintf("var page = require('webpage').create();
page.open('%s', function () {
console.log(page.content); //page source
phantom.exit();
});", sturl), con="scrape.js")
system("phantomjs scrape.js > gwoo-st.html")
gwoosturl <- read_html("gwoo-st.html")
gwoost <- html_table(html_nodes(gwoosturl, "table")[[4]],fill=TRUE)
gwoost[1] <- NULL
names(gwoost) <- c("precinct","dem","wf","constitution",
"independence","green","gop",
"american","libertarian","sttotal")
gwoost %>%
filter(precinct != "Precinct" & precinct != "Total:") -> gwoost
gwoost[, 2:ncol(gwoost)] <- apply(gwoost[,2:ncol(gwoost)], 2, function(x) as.numeric(gsub(',', '',x)))
# gwoost$county <- county
# gwoost <- gwoost[,c(11,1:10)]
# write_csv(gwoost, "2016-ge-gwoost.csv")
# President data.
# ---------------
writeLines(sprintf("var page = require('webpage').create();
page.open('%s', function () {
console.log(page.content); //page source
phantom.exit();
});", potusurl), con="scrape.js")
system("phantomjs scrape.js > gwoo-potus.html")
gwoopotusurl <- read_html("gwoo-potus.html")
gwoopotus <- html_table(html_nodes(gwoopotusurl, "table")[[4]],fill=TRUE)
gwoopotus[1] <- NULL
names(gwoopotus) <- c("precinct","clinton","castle","mcmullin",
"stein","trump","skewes",
"johnson","potustotal")
gwoopotus %>%
filter(precinct != "Precinct" & precinct != "Total:") -> gwoopotus
gwoopotus[, 2:ncol(gwoopotus)] <- apply(gwoopotus[,2:ncol(gwoopotus)], 2, function(x) as.numeric(gsub(',', '',x)))
# gwoopotus$county <- county
# gwoopotus <- gwoopotus[,c(10,1:9)]
# write_csv(gwoopotus, "2016-ge-gwoopotus.csv")
# Senate race data.
# -----------------
writeLines(sprintf("var page = require('webpage').create();
page.open('%s', function () {
console.log(page.content); //page source
phantom.exit();
});", senurl), con="scrape.js")
system("phantomjs scrape.js > gwoo-senate.html")
gwoosenurl <- read_html("gwoo-senate.html")
gwoosen <- html_table(html_nodes(gwoosenurl, "table")[[4]],fill=TRUE)
gwoosen[1] <- NULL
# Weirdly enough, Dixon is triple-counted and Bledsoe is double-counted. Fucking SC votes...
names(gwoosen) <- c("precinct","dixon","dixon2","bledsoe",
"dixon3","scott","scarborough",
"bledsoe2","senwritein","sentotal")
gwoosen %>%
filter(precinct != "Precinct" & precinct != "Total:") -> gwoosen
gwoosen[, 2:ncol(gwoosen)] <- apply(gwoosen[,2:ncol(gwoosen)], 2, function(x) as.numeric(gsub(',', '',x)))
gwoosen$dixon <- with(gwoosen, dixon+dixon2+dixon3)
gwoosen$dixon2 <- gwoosen$dixon3 <- NULL
gwoosen$bledsoe <- with(gwoosen, bledsoe + bledsoe2)
gwoosen$bledsoe2 <- NULL
# gwoosen$county <- county
# gwoosen <- gwoosen[,c(8,1:7)]
# write_csv(gwoosen, "2016-ge-gwoosen.csv")
# House race data.
# ----------------
writeLines(sprintf("var page = require('webpage').create();
page.open('%s', function () {
console.log(page.content); //page source
phantom.exit();
});", houseurl), con="scrape.js")
system("phantomjs scrape.js > gwoo-house.html")
gwoohouseurl <- read_html("gwoo-house.html")
gwoohouse <- html_table(html_nodes(gwoohouseurl, "table")[[4]],fill=TRUE)
gwoohouse[1] <- NULL
names(gwoohouse) <- c("precinct","cleveland","duncan", "housewritein","housetotal")
gwoohouse %>%
filter(precinct != "Precinct" & precinct != "Total:") -> gwoohouse
gwoohouse[, 2:ncol(gwoohouse)] <- apply(gwoohouse[,2:ncol(gwoohouse)], 2, function(x) as.numeric(gsub(',', '',x)))
# gwoohouse$county <- county
#gwoohouse <- gwoohouse %>%
# select(county,precinct:housetotal)
# write_csv(gwoohouse, "2016-ge-gwoohouse.csv")
Greenwood <- left_join(gwooturnout, gwoost) %>%
left_join(., gwoopotus) %>%
left_join(., gwoosen) %>%
left_join(., gwoohouse) %>%
tbl_df() %>%
mutate(county = county) %>%
select(county, everything())
write_csv(Greenwood, "2016-ge-gwoo-total.csv")
| /greenwood/2016/1-scrape-election-results-greenwood.R | no_license | svmiller/upstate-election-data | R | false | false | 5,448 | r | library(tidyverse)
library(rvest)
library(stringr)
setwd("~/Dropbox/democrats/election-results-data/greenwood/2016")
county <- "greenwood"
turnouturl <- read_html("http://www.enr-scvotes.org/SC/Greenwood/64682/183626/en/vt_data.html")
sturl <- "http://www.enr-scvotes.org/SC/Greenwood/64682/183626/en/md_data.html?cid=0103&"
potusurl <- "http://www.enr-scvotes.org/SC/Greenwood/64682/183626/en/md_data.html?cid=0104&"
senurl <- "http://www.enr-scvotes.org/SC/Greenwood/64682/183626/en/md_data.html?cid=0105&"
houseurl <- "http://www.enr-scvotes.org/SC/Greenwood/64682/183626/en/md_data.html?cid=0106&"
# Turnout is fairly easy.
# -----------------------
gwooturnout = html_table(html_nodes(turnouturl, "table")[[4]])
names(gwooturnout) <- c("precinct","bc","rv","vtp")
gwooturnout %>%
filter(precinct != "Precinct" & precinct != "Total:") -> gwooturnout
gwooturnout$bc <- as.numeric(gsub(',', '', gwooturnout$bc))
gwooturnout$rv <- as.numeric(gsub(',', '', gwooturnout$rv))
gwooturnout$vtp <- with(gwooturnout, (bc/rv)*100)
#gwooturnout$county <- county
#gwooturnout <- gwooturnout[,c(5,1:4)]
# write_csv(gwooturnout, "2016-ge-gwooturnout.csv")
# Straight-ticket data.
# ---------------------
writeLines(sprintf("var page = require('webpage').create();
page.open('%s', function () {
console.log(page.content); //page source
phantom.exit();
});", sturl), con="scrape.js")
system("phantomjs scrape.js > gwoo-st.html")
gwoosturl <- read_html("gwoo-st.html")
gwoost <- html_table(html_nodes(gwoosturl, "table")[[4]],fill=TRUE)
gwoost[1] <- NULL
names(gwoost) <- c("precinct","dem","wf","constitution",
"independence","green","gop",
"american","libertarian","sttotal")
gwoost %>%
filter(precinct != "Precinct" & precinct != "Total:") -> gwoost
gwoost[, 2:ncol(gwoost)] <- apply(gwoost[,2:ncol(gwoost)], 2, function(x) as.numeric(gsub(',', '',x)))
# gwoost$county <- county
# gwoost <- gwoost[,c(11,1:10)]
# write_csv(gwoost, "2016-ge-gwoost.csv")
# President data.
# ---------------
writeLines(sprintf("var page = require('webpage').create();
page.open('%s', function () {
console.log(page.content); //page source
phantom.exit();
});", potusurl), con="scrape.js")
system("phantomjs scrape.js > gwoo-potus.html")
gwoopotusurl <- read_html("gwoo-potus.html")
gwoopotus <- html_table(html_nodes(gwoopotusurl, "table")[[4]],fill=TRUE)
gwoopotus[1] <- NULL
names(gwoopotus) <- c("precinct","clinton","castle","mcmullin",
"stein","trump","skewes",
"johnson","potustotal")
gwoopotus %>%
filter(precinct != "Precinct" & precinct != "Total:") -> gwoopotus
gwoopotus[, 2:ncol(gwoopotus)] <- apply(gwoopotus[,2:ncol(gwoopotus)], 2, function(x) as.numeric(gsub(',', '',x)))
# gwoopotus$county <- county
# gwoopotus <- gwoopotus[,c(10,1:9)]
# write_csv(gwoopotus, "2016-ge-gwoopotus.csv")
# Senate race data.
# -----------------
writeLines(sprintf("var page = require('webpage').create();
page.open('%s', function () {
console.log(page.content); //page source
phantom.exit();
});", senurl), con="scrape.js")
system("phantomjs scrape.js > gwoo-senate.html")
gwoosenurl <- read_html("gwoo-senate.html")
gwoosen <- html_table(html_nodes(gwoosenurl, "table")[[4]],fill=TRUE)
gwoosen[1] <- NULL
# Weirdly enough, Dixon is triple-counted and Bledsoe is double-counted. Fucking SC votes...
names(gwoosen) <- c("precinct","dixon","dixon2","bledsoe",
"dixon3","scott","scarborough",
"bledsoe2","senwritein","sentotal")
gwoosen %>%
filter(precinct != "Precinct" & precinct != "Total:") -> gwoosen
gwoosen[, 2:ncol(gwoosen)] <- apply(gwoosen[,2:ncol(gwoosen)], 2, function(x) as.numeric(gsub(',', '',x)))
gwoosen$dixon <- with(gwoosen, dixon+dixon2+dixon3)
gwoosen$dixon2 <- gwoosen$dixon3 <- NULL
gwoosen$bledsoe <- with(gwoosen, bledsoe + bledsoe2)
gwoosen$bledsoe2 <- NULL
# gwoosen$county <- county
# gwoosen <- gwoosen[,c(8,1:7)]
# write_csv(gwoosen, "2016-ge-gwoosen.csv")
# House race data.
# ----------------
writeLines(sprintf("var page = require('webpage').create();
page.open('%s', function () {
console.log(page.content); //page source
phantom.exit();
});", houseurl), con="scrape.js")
system("phantomjs scrape.js > gwoo-house.html")
gwoohouseurl <- read_html("gwoo-house.html")
gwoohouse <- html_table(html_nodes(gwoohouseurl, "table")[[4]],fill=TRUE)
gwoohouse[1] <- NULL
names(gwoohouse) <- c("precinct","cleveland","duncan", "housewritein","housetotal")
gwoohouse %>%
filter(precinct != "Precinct" & precinct != "Total:") -> gwoohouse
gwoohouse[, 2:ncol(gwoohouse)] <- apply(gwoohouse[,2:ncol(gwoohouse)], 2, function(x) as.numeric(gsub(',', '',x)))
# gwoohouse$county <- county
#gwoohouse <- gwoohouse %>%
# select(county,precinct:housetotal)
# write_csv(gwoohouse, "2016-ge-gwoohouse.csv")
Greenwood <- left_join(gwooturnout, gwoost) %>%
left_join(., gwoopotus) %>%
left_join(., gwoosen) %>%
left_join(., gwoohouse) %>%
tbl_df() %>%
mutate(county = county) %>%
select(county, everything())
write_csv(Greenwood, "2016-ge-gwoo-total.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ut_poly.r
\docType{data}
\name{ut_poly}
\alias{ut_poly}
\title{Utah state boundary, excluding tribal jurisdictions (unofficial)}
\format{
An sf type polygon shapefile
}
\usage{
ut_poly
}
\description{
Polygon showing unofficial boundaries of the State of Utah, excluding tribal jurisdictions. This is an unofficial representation used to screen sites for the integrated report only.
}
\keyword{datasets}
| /man/ut_poly.Rd | permissive | utah-dwq/wqTools | R | false | true | 482 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ut_poly.r
\docType{data}
\name{ut_poly}
\alias{ut_poly}
\title{Utah state boundary, excluding tribal jurisdictions (unofficial)}
\format{
An sf type polygon shapefile
}
\usage{
ut_poly
}
\description{
Polygon showing unofficial boundaries of the State of Utah, excluding tribal jurisdictions. This is an unofficial representation used to screen sites for the integrated report only.
}
\keyword{datasets}
|
library(httr)
library(httpuv)
library(jsonlite)
library(sqldf)
library(XML)
# 1. Find OAuth settings for github:
# http://developer.github.com/v3/oauth/
oauth_endpoints("github")
# 2. To make your own application, register at
# https://github.com/settings/developers. Use any URL for the homepage URL
# (http://github.com is fine) and http://localhost:1410 as the callback url
#93f6121b72ea9130d29c20470b8c878fe6456730 Personal access tokens
userName <- "Ecologo"
theToken <- "93f6121b72ea9130d29c20470b8c878fe6456730"
aRequest <- GET("https://api.github.com/users/jtleek/repos",authenticate(userName,theToken))
#
# code to parse out the datasharing content goes here
jsonReq <- fromJSON("https://api.github.com/users/jtleek/repos")
dfReq <- as.data.frame(cbind(jsonReq$name,jsonReq$created_at))
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv", destfile = "survey.csv")
acs <- read.csv("./survey.csv")
sqldf("select pwgtp1 from acs where AGEP < 50")
sqldf("select distinct AGEP from acs")
url <- "http://biostat.jhsph.edu/~jleek/contact.html"
charPerLine <- nchar(readLines(url))
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for", destfile = "noaaElNino.for")
noaaElNino <- read.fwf("./noaaElNino.for", header = FALSE,skip = 4 ,widths = c(28, 4))
| /Quiz2/week2_CourseraQuiz.R | no_license | Ecologo/datasciencecoursera | R | false | false | 1,332 | r |
library(httr)
library(httpuv)
library(jsonlite)
library(sqldf)
library(XML)
# 1. Find OAuth settings for github:
# http://developer.github.com/v3/oauth/
oauth_endpoints("github")
# 2. To make your own application, register at
# https://github.com/settings/developers. Use any URL for the homepage URL
# (http://github.com is fine) and http://localhost:1410 as the callback url
#93f6121b72ea9130d29c20470b8c878fe6456730 Personal access tokens
userName <- "Ecologo"
theToken <- "93f6121b72ea9130d29c20470b8c878fe6456730"
aRequest <- GET("https://api.github.com/users/jtleek/repos",authenticate(userName,theToken))
#
# code to parse out the datasharing content goes here
jsonReq <- fromJSON("https://api.github.com/users/jtleek/repos")
dfReq <- as.data.frame(cbind(jsonReq$name,jsonReq$created_at))
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv", destfile = "survey.csv")
acs <- read.csv("./survey.csv")
sqldf("select pwgtp1 from acs where AGEP < 50")
sqldf("select distinct AGEP from acs")
url <- "http://biostat.jhsph.edu/~jleek/contact.html"
charPerLine <- nchar(readLines(url))
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for", destfile = "noaaElNino.for")
noaaElNino <- read.fwf("./noaaElNino.for", header = FALSE,skip = 4 ,widths = c(28, 4))
|
library(koRpus.lang.en)
### Name: lang.support.en
### Title: Language support for English
### Aliases: lang.support.en
### ** Examples
lang.support.en()
| /data/genthat_extracted_code/koRpus.lang.en/examples/lang.support.en.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 160 | r | library(koRpus.lang.en)
### Name: lang.support.en
### Title: Language support for English
### Aliases: lang.support.en
### ** Examples
lang.support.en()
|
#' @rdname authentication
#' @export
twitch_auth_forget = function(){ httr::reset_config() } | /R/twitch_auth_forget.R | permissive | praster1/igdbV4 | R | false | false | 92 | r | #' @rdname authentication
#' @export
twitch_auth_forget = function(){ httr::reset_config() } |
###
### The gauge module renders a gauge with a title and statistics below the gauge
###
piesModuleUI <- function(id){
# set namespace via id
ns <- NS(id)
tagList(uiOutput(ns("Pies")))
}
# module server
piesModule <- function(input, output, session, moduleData){
ns <- session$ns
#### Render pies ----
output$Pies <- renderUI({
L <- list(
br(),
fluidRow(
column(3,
h4("Process", style = "text-align:center"),
FrissC3PieChartOutput(ns("PieProcess"))
),
column(3,
h4("Label", style = "text-align:center"),
FrissC3PieChartOutput(ns("PieLabel"))
),
column(3,
h4("Product", style = "text-align:center"),
FrissC3PieChartOutput(ns("PieProduct"))
),
column(3, style = "overflow: hidden;",
h4("Branch", style = "text-align:center"),
FrissC3PieChartOutput(ns("PieBranch"))
)
)
)
return(L)
})
# pie charts
PieWidth <- "50%"
output$PieProcess <- renderFrissC3PieChart({
lstData <- moduleData()
FrissC3PieChart(lstData$procesCounts,height=250,width = PieWidth,maxGroups=5,legendPosition='right',dataHidden=diff)
})
output$PieLabel <- renderFrissC3PieChart({
lstData <- moduleData()
FrissC3PieChart(lstData$labelCounts,height=250,width = PieWidth,maxGroups=5,legendPosition='right')
})
output$PieProduct <- renderFrissC3PieChart({
lstData <- moduleData()
FrissC3PieChart(lstData$productCounts,height=250,width = PieWidth, maxGroups=5,legendPosition='right')
})
output$PieBranch <- renderFrissC3PieChart({
lstData <- moduleData()
FrissC3PieChart(lstData$brancheCounts,height=250,width = PieWidth, maxGroups=5,legendPosition='right')
})
returnItems <- reactive({
# This check makes sure the filter app has been fully loaded
# This way we prevent an unnecessary trigger
if(is.null(input$PieProcess))
return(NULL)
else
return(list(PieProcess = input$PieProcess,PieLabel = input$PieLabel,PieProduct = input$PieProduct,PieBranch = input$PieBranch))
})
return(returnItems)
}
| /Shiny_Tools/dashboard_showcase-master/app/modules/piesModule.R | no_license | DannyJRa/Shiny_Tools | R | false | false | 2,248 | r | ###
### The gauge module renders a gauge with a title and statistics below the gauge
###
piesModuleUI <- function(id){
# set namespace via id
ns <- NS(id)
tagList(uiOutput(ns("Pies")))
}
# module server
piesModule <- function(input, output, session, moduleData){
ns <- session$ns
#### Render pies ----
output$Pies <- renderUI({
L <- list(
br(),
fluidRow(
column(3,
h4("Process", style = "text-align:center"),
FrissC3PieChartOutput(ns("PieProcess"))
),
column(3,
h4("Label", style = "text-align:center"),
FrissC3PieChartOutput(ns("PieLabel"))
),
column(3,
h4("Product", style = "text-align:center"),
FrissC3PieChartOutput(ns("PieProduct"))
),
column(3, style = "overflow: hidden;",
h4("Branch", style = "text-align:center"),
FrissC3PieChartOutput(ns("PieBranch"))
)
)
)
return(L)
})
# pie charts
PieWidth <- "50%"
output$PieProcess <- renderFrissC3PieChart({
lstData <- moduleData()
FrissC3PieChart(lstData$procesCounts,height=250,width = PieWidth,maxGroups=5,legendPosition='right',dataHidden=diff)
})
output$PieLabel <- renderFrissC3PieChart({
lstData <- moduleData()
FrissC3PieChart(lstData$labelCounts,height=250,width = PieWidth,maxGroups=5,legendPosition='right')
})
output$PieProduct <- renderFrissC3PieChart({
lstData <- moduleData()
FrissC3PieChart(lstData$productCounts,height=250,width = PieWidth, maxGroups=5,legendPosition='right')
})
output$PieBranch <- renderFrissC3PieChart({
lstData <- moduleData()
FrissC3PieChart(lstData$brancheCounts,height=250,width = PieWidth, maxGroups=5,legendPosition='right')
})
returnItems <- reactive({
# This check makes sure the filter app has been fully loaded
# This way we prevent an unnecessary trigger
if(is.null(input$PieProcess))
return(NULL)
else
return(list(PieProcess = input$PieProcess,PieLabel = input$PieLabel,PieProduct = input$PieProduct,PieBranch = input$PieBranch))
})
return(returnItems)
}
|
utils::globalVariables(c("invDRIFT","II","DRIFTexp","vec2diag","diag2vec",
"mxData","mxMatrix","mxAlgebra","MANIFESTVARbase","MANIFESTVARcholdiag",
"MANIFESTVARchol","T0VARbase","T0VARcholdiag","T0VARchol","DIFFUSIONbase",
"DIFFUSIONcholdiag","DIFFUSIONchol","invDRIFTHATCH","cvectorize","DRIFTHATCH",
"TRAITVARbase","TRAITVARcholdiag","TRAITVARchol","MANIFESTTRAITVARbase",
"MANIFESTTRAITVARcholdiag","MANIFESTTRAITVARchol","mxComputeSequence",
"mxComputeGradientDescent","mxComputeReportDeriv","TDPREDVARbase",
"TDPREDVARcholdiag","TDPREDVARchol","TIPREDVARbase","TIPREDVARcholdiag",
"TIPREDVARchol","mxExpectationRAM","mxFitFunctionML","Ilatent","Alatent",
"Amanifestcov","invIminusAlatent","Smanifest","Amanifest","Mmanifest",
"mxExpectationNormal","omxSelectRowsAndCols","expCov","existenceVector",
"omxSelectCols","expMean","log2pi","numVar_i","filteredExpCov","%&%",
"filteredDataRow","filteredExpMean","firstHalfCalc","secondHalfCalc",
"rowResults","mxFitFunctionRow","TdpredNames","discreteCINT_T1","discreteDRIFT_T1",
"discreteDIFFUSION_T1","mxExpectationStateSpace","mxExpectationSSCT","ctsem.fitfunction",
"ctsem.penalties","FIMLpenaltyweight","ctsem.simpleDynPenalty","ieigenval",
"mxFitFunctionAlgebra","mxCI","mxComputeConfidenceInterval","DRIFT",
"n.latent","DIFFUSION","TRAITVAR","n.TDpred","TDPREDEFFECT","TDPREDMEANS",
"TDPREDVAR","TRAITTDPREDCOV","n.TIpred","TIPREDEFFECT","TIPREDMEANS",
"TIPREDVAR","CINT","n.manifest","LAMBDA","MANIFESTMEANS","MANIFESTVAR",
"mxFitFunctionMultigroup", "asymDIFFUSION", 'data.id',
'filteredExpCovchol','filteredExpCovcholinv',
'A','M','testd',
'T0VAR','T0MEANS', 'MANIFESTTRAITVAR',
'TDpredNames', 'TIpredNames', 'Tpoints', 'extract', 'latentNames', 'manifestNames',
'plot', 'points','T0TRAITEFFECT',
'T0VARsubindex','DRIFTsubindex','DIFFUSIONsubindex','CINTsubindex'))
#' ctsem
#'
#' ctsem is an R package for continuous time structural equation modelling of panel (N > 1)
#' and time series (N = 1) data, using either a frequentist or Bayesian approach.
#' The frequentist approach is faster but can only estimate random-effects on the intercepts,
#' while the Bayesian approach allows for random-effects across all model parameters.
#'
#' The general workflow begins by specifying a model using the \code{\link{ctModel}} function,
#' in which the \code{type} of model is also specified. Then the model is fit to data using
#' either \code{\link{ctFit}} if an 'omx' (OpenMx, frequentist) model is specified or
#' \code{\link{ctStanFit}} if a 'stanct' or 'standt' (Stan, continuous / discrete time, Bayesian)
#' model is specified.
#' For examples, see either \code{\link{ctFit}} or \code{\link{ctStanFit}}.
#' For more detailed information, see the frequentist vignette by running: \code{vignette('ctsem')}
#' For citation info, please run \code{citation('ctsem')} .
#'
#' @docType package
#' @name ctsem
#' @import grDevices methods stats rstan OpenMx graphics plyr rstantools rstan Rcpp
#' @importFrom utils relist as.relistable tail capture.output
#' @useDynLib ctsem, .registration = TRUE
#'
#' @references
#' https://www.jstatsoft.org/article/view/v077i05
#'
#' Driver, C. C., & Voelkle, M. C. (2018). Hierarchical Bayesian continuous time dynamic modeling.
#' Psychological Methods. Advance online publication.http://dx.doi.org/10.1037/met0000168
#'
#' Stan Development Team (2018). RStan: the R interface to Stan. R package version 2.17.3. http://mc-stan.org
#'
NULL
| /R/ctsem.R | no_license | AndreMikulec/ctsem | R | false | false | 3,503 | r | utils::globalVariables(c("invDRIFT","II","DRIFTexp","vec2diag","diag2vec",
"mxData","mxMatrix","mxAlgebra","MANIFESTVARbase","MANIFESTVARcholdiag",
"MANIFESTVARchol","T0VARbase","T0VARcholdiag","T0VARchol","DIFFUSIONbase",
"DIFFUSIONcholdiag","DIFFUSIONchol","invDRIFTHATCH","cvectorize","DRIFTHATCH",
"TRAITVARbase","TRAITVARcholdiag","TRAITVARchol","MANIFESTTRAITVARbase",
"MANIFESTTRAITVARcholdiag","MANIFESTTRAITVARchol","mxComputeSequence",
"mxComputeGradientDescent","mxComputeReportDeriv","TDPREDVARbase",
"TDPREDVARcholdiag","TDPREDVARchol","TIPREDVARbase","TIPREDVARcholdiag",
"TIPREDVARchol","mxExpectationRAM","mxFitFunctionML","Ilatent","Alatent",
"Amanifestcov","invIminusAlatent","Smanifest","Amanifest","Mmanifest",
"mxExpectationNormal","omxSelectRowsAndCols","expCov","existenceVector",
"omxSelectCols","expMean","log2pi","numVar_i","filteredExpCov","%&%",
"filteredDataRow","filteredExpMean","firstHalfCalc","secondHalfCalc",
"rowResults","mxFitFunctionRow","TdpredNames","discreteCINT_T1","discreteDRIFT_T1",
"discreteDIFFUSION_T1","mxExpectationStateSpace","mxExpectationSSCT","ctsem.fitfunction",
"ctsem.penalties","FIMLpenaltyweight","ctsem.simpleDynPenalty","ieigenval",
"mxFitFunctionAlgebra","mxCI","mxComputeConfidenceInterval","DRIFT",
"n.latent","DIFFUSION","TRAITVAR","n.TDpred","TDPREDEFFECT","TDPREDMEANS",
"TDPREDVAR","TRAITTDPREDCOV","n.TIpred","TIPREDEFFECT","TIPREDMEANS",
"TIPREDVAR","CINT","n.manifest","LAMBDA","MANIFESTMEANS","MANIFESTVAR",
"mxFitFunctionMultigroup", "asymDIFFUSION", 'data.id',
'filteredExpCovchol','filteredExpCovcholinv',
'A','M','testd',
'T0VAR','T0MEANS', 'MANIFESTTRAITVAR',
'TDpredNames', 'TIpredNames', 'Tpoints', 'extract', 'latentNames', 'manifestNames',
'plot', 'points','T0TRAITEFFECT',
'T0VARsubindex','DRIFTsubindex','DIFFUSIONsubindex','CINTsubindex'))
#' ctsem
#'
#' ctsem is an R package for continuous time structural equation modelling of panel (N > 1)
#' and time series (N = 1) data, using either a frequentist or Bayesian approach.
#' The frequentist approach is faster but can only estimate random-effects on the intercepts,
#' while the Bayesian approach allows for random-effects across all model parameters.
#'
#' The general workflow begins by specifying a model using the \code{\link{ctModel}} function,
#' in which the \code{type} of model is also specified. Then the model is fit to data using
#' either \code{\link{ctFit}} if an 'omx' (OpenMx, frequentist) model is specified or
#' \code{\link{ctStanFit}} if a 'stanct' or 'standt' (Stan, continuous / discrete time, Bayesian)
#' model is specified.
#' For examples, see either \code{\link{ctFit}} or \code{\link{ctStanFit}}.
#' For more detailed information, see the frequentist vignette by running: \code{vignette('ctsem')}
#' For citation info, please run \code{citation('ctsem')} .
#'
#' @docType package
#' @name ctsem
#' @import grDevices methods stats rstan OpenMx graphics plyr rstantools rstan Rcpp
#' @importFrom utils relist as.relistable tail capture.output
#' @useDynLib ctsem, .registration = TRUE
#'
#' @references
#' https://www.jstatsoft.org/article/view/v077i05
#'
#' Driver, C. C., & Voelkle, M. C. (2018). Hierarchical Bayesian continuous time dynamic modeling.
#' Psychological Methods. Advance online publication.http://dx.doi.org/10.1037/met0000168
#'
#' Stan Development Team (2018). RStan: the R interface to Stan. R package version 2.17.3. http://mc-stan.org
#'
NULL
|
# lookup tables are downloaded from
# https://www.census.gov/programs-surveys/acs/technical-documentation/summary-file-documentation.2017.html
# more general, the lookup table are located in usertools subdirectory of ftp sites, for old years.
# https://www2.census.gov/programs-surveys/acs/summary_file/2010/documentation/5_year/user_tools/
# some named differently
# https://www2.census.gov/programs-surveys/acs/summary_file/2009/documentation/1_year/user_tools/merge_5_6.txt
# Appendices are available from 2013. Download them from
# https://www.census.gov/programs-surveys/acs/technical-documentation/summary-file-documentation.2017.html
# ignore if not available
library(data.table)
library(magrittr)
library(readxl)
library(stringr)
library(totalcensus)
make_acs_lookup <- function(period, year){
# period : 1 or 5 year
# year : year of the survey
file_lookup <- paste0(
"data_raw/acs/",
"ACS_", period, "yr_Seq_Table_Number_Lookup_", year, ".txt"
)
file_lookup_xls <- paste0(
"data_raw/acs/",
"ACS_", period, "yr_Seq_Table_Number_Lookup_", year, ".xls"
)
if (file.exists(file_lookup)){
dt <- fread(file_lookup, colClasses = "character", encoding = "Latin-1")
} else if (file.exists(file_lookup_xls)) {
dt <- read_excel(file_lookup_xls, col_types = "text")
} else {
message("Please download the file sequence/table number lookup file in .txt or .xls format")
return(NULL)
}
if (year == 2005){
table_shell <- read_excel("data_raw/acs/ACS_tables_Sum_file_shells_2005_1yr.xls", col_types = "text") %>%
.[, 1:4] %>%
setDT() %>%
setnames(c("table_number", "table_seq", "table_content", "file_segment")) %>%
.[!is.na(table_seq) & !str_detect(table_seq, "\\.")] %>%
.[as.integer(table_seq) < 1000, reference := paste0(table_number, "_", table_seq)] %>%
.[as.integer(table_seq) < 100, reference := paste0(table_number, "_","0", table_seq)] %>%
.[as.integer(table_seq) < 10, reference := paste0(table_number, "_","00", table_seq)] %>%
.[, table_seq := NULL]
dt <- setDT(dt) %>%
.[, c(2, 3, 7), with = FALSE] %>%
setnames(c("table_number", "file_segment", "table_name"))
name <- dt[!is.na(table_number)]
universe <- dt[is.na(table_number), .(universe = table_name)] %>%
.[, universe := str_remove(universe, "^Universe: ")]
dt <- cbind(name, universe)
# a table content may appear in multiple file segment, join with file_seg too
lookup_acs1year_2005 <- dt[table_shell, on = .(table_number, file_segment)] %>%
.[, restriction := "unknown"] %>%
setcolorder(c("file_segment", "table_content", "reference", "restriction",
"table_number", "table_name", "universe"))
save(lookup_acs1year_2005, file = "data/lookup_acs1year_2005.RData",
compress = "xz", compression_level = 9)
return(lookup_acs1year_2005)
} else {
dt <- setDT(dt) %>%
.[, c(2, 3, 4, 8), with = FALSE] %>%
setnames(1:4, c("table_number", "file_segment", "reference", "table_name"))
}
univ <- dt[, .SD[2], by = .(table_number)] %>%
.[, .(table_number, universe = table_name)] %>%
setkey(table_number)
# some lookup file contain reference like "0.5", "2.7" that are not in the
# raw data, remove them otherwise will cause column problem.
# in 2007 acs1year, reference of table_content == Universe: xxxx is not
# empty but "0"
content <- dt[reference != "" & reference != "0" & !grepl("\\.", reference)] %>%
setnames("table_name", "table_content") %>%
# change the reference from 1, 12, ... to 001, 012, ...
.[str_length(reference) == 1, reference := paste0("00", reference)] %>%
.[str_length(reference) == 2, reference := paste0("0", reference)] %>%
.[, reference := paste0(table_number, "_", reference)] %>%
# the 1-year 2014 lookup file has 1, 2, 3, ... as file_segment
# change to 0001, 0002, ...
.[str_length(file_segment) == 1, file_segment := paste0("000", file_segment)] %>%
.[str_length(file_segment) == 2, file_segment := paste0("00", file_segment)] %>%
.[str_length(file_segment) == 3, file_segment := paste0("0", file_segment)] %>%
setkey(table_number)
# Appendices.xls files are not available for years earlier than 2013
if (year >= 2013){
file_restriction <- paste0(
"data_raw/acs/",
"ACS_", year, "_SF_", period, "YR_Appendices.xls"
)
restrict <- read_excel(file_restriction) %>%
.[, c(1, 3)] %>%
setDT() %>%
setnames(c("table_number", "restriction")) %>%
unique()
} else {
# make a fake restriction (unknown restriction)
restrict <- dt[, .(table_number)] %>%
.[, restriction := "unknown"] %>%
unique()
}
tabl <- dt[, .SD[1], by = .(table_number)] %>%
.[, .(table_number, table_name)] %>%
restrict[., on = .(table_number)] %>%
setkey(table_number)
dict <- tabl[content] %>%
univ[.] %>%
setcolorder(c("file_segment", "table_content", "reference", "restriction",
"table_number", "table_name", "universe")) %>%
.[order(file_segment)]
# if (year == 2005){
# # use 2006 table name to replace 2005 table name
# L2006 <- lookup_acs1year_2006[, .(reference, table_name)]
# }
# save to R/data/
dict_name <- paste0("lookup_acs", period, "year_", year)
assign(dict_name, dict)
save_as <- paste0("data/lookup_acs", period, "year_", year, ".RData" )
save(list = dict_name, file = save_as,
compress = "xz", compression_level = 9)
return(dict)
}
# ACS 5-year
lookup_acs5year_2017 <- make_acs_lookup(5, 2017)
lookup_acs5year_2016 <- make_acs_lookup(5, 2016)
lookup_acs5year_2015 <- make_acs_lookup(5, 2015)
lookup_acs5year_2014 <- make_acs_lookup(5, 2014)
lookup_acs5year_2013 <- make_acs_lookup(5, 2013)
lookup_acs5year_2012 <- make_acs_lookup(5, 2012)
lookup_acs5year_2011 <- make_acs_lookup(5, 2011)
lookup_acs5year_2010 <- make_acs_lookup(5, 2010)
lookup_acs5year_2009 <- make_acs_lookup(5, 2009)
# ACS 1-year
lookup_acs1year_2017 <- make_acs_lookup(1, 2017)
lookup_acs1year_2016 <- make_acs_lookup(1, 2016)
lookup_acs1year_2015 <- make_acs_lookup(1, 2015)
lookup_acs1year_2014 <- make_acs_lookup(1, 2014)
lookup_acs1year_2013 <- make_acs_lookup(1, 2013)
lookup_acs1year_2012 <- make_acs_lookup(1, 2012)
lookup_acs1year_2011 <- make_acs_lookup(1, 2011)
lookup_acs1year_2010 <- make_acs_lookup(1, 2010)
lookup_acs1year_2009 <- make_acs_lookup(1, 2009)
lookup_acs1year_2008 <- make_acs_lookup(1, 2008)
lookup_acs1year_2007 <- make_acs_lookup(1, 2007)
lookup_acs1year_2006 <- make_acs_lookup(1, 2006)
lookup_acs1year_2005 <- make_acs_lookup(1, 2005)
# # save to data/
# save(lookup_acs5year_2016, file = "data/lookup_acs5year_2016.RData",
# compress = "xz", compression_level = 9)
# save(lookup_acs5year_2015, file = "data/lookup_acs5year_2015.RData",
# compress = "xz", compression_level = 9)
# save(lookup_acs5year_2010, file = "data/lookup_acs5year_2010.RData",
# compress = "xz", compression_level = 9)
#
# save(lookup_acs1year_2017, file = "data/lookup_acs1year_2017.RData",
# compress = "xz", compression_level = 9)
# save(lookup_acs1year_2016, file = "data/lookup_acs1year_2016.RData",
# compress = "xz", compression_level = 9)
# save(lookup_acs1year_2015, file = "data/lookup_acs1year_2015.RData",
# compress = "xz", compression_level = 9)
# save(lookup_acs1year_2014, file = "data/lookup_acs1year_2014.RData",
# compress = "xz", compression_level = 9)
# save(lookup_acs1year_2010, file = "data/lookup_acs1year_2010.RData",
# compress = "xz", compression_level = 9)
| /data_raw/acs/create_acs_file_table_lookup.R | no_license | RealCoChenchao/totalcensus | R | false | false | 8,024 | r | # lookup tables are downloaded from
# https://www.census.gov/programs-surveys/acs/technical-documentation/summary-file-documentation.2017.html
# more general, the lookup table are located in usertools subdirectory of ftp sites, for old years.
# https://www2.census.gov/programs-surveys/acs/summary_file/2010/documentation/5_year/user_tools/
# some named differently
# https://www2.census.gov/programs-surveys/acs/summary_file/2009/documentation/1_year/user_tools/merge_5_6.txt
# Appendices are available from 2013. Download them from
# https://www.census.gov/programs-surveys/acs/technical-documentation/summary-file-documentation.2017.html
# ignore if not available
library(data.table)
library(magrittr)
library(readxl)
library(stringr)
library(totalcensus)
make_acs_lookup <- function(period, year){
# period : 1 or 5 year
# year : year of the survey
file_lookup <- paste0(
"data_raw/acs/",
"ACS_", period, "yr_Seq_Table_Number_Lookup_", year, ".txt"
)
file_lookup_xls <- paste0(
"data_raw/acs/",
"ACS_", period, "yr_Seq_Table_Number_Lookup_", year, ".xls"
)
if (file.exists(file_lookup)){
dt <- fread(file_lookup, colClasses = "character", encoding = "Latin-1")
} else if (file.exists(file_lookup_xls)) {
dt <- read_excel(file_lookup_xls, col_types = "text")
} else {
message("Please download the file sequence/table number lookup file in .txt or .xls format")
return(NULL)
}
if (year == 2005){
table_shell <- read_excel("data_raw/acs/ACS_tables_Sum_file_shells_2005_1yr.xls", col_types = "text") %>%
.[, 1:4] %>%
setDT() %>%
setnames(c("table_number", "table_seq", "table_content", "file_segment")) %>%
.[!is.na(table_seq) & !str_detect(table_seq, "\\.")] %>%
.[as.integer(table_seq) < 1000, reference := paste0(table_number, "_", table_seq)] %>%
.[as.integer(table_seq) < 100, reference := paste0(table_number, "_","0", table_seq)] %>%
.[as.integer(table_seq) < 10, reference := paste0(table_number, "_","00", table_seq)] %>%
.[, table_seq := NULL]
dt <- setDT(dt) %>%
.[, c(2, 3, 7), with = FALSE] %>%
setnames(c("table_number", "file_segment", "table_name"))
name <- dt[!is.na(table_number)]
universe <- dt[is.na(table_number), .(universe = table_name)] %>%
.[, universe := str_remove(universe, "^Universe: ")]
dt <- cbind(name, universe)
# a table content may appear in multiple file segment, join with file_seg too
lookup_acs1year_2005 <- dt[table_shell, on = .(table_number, file_segment)] %>%
.[, restriction := "unknown"] %>%
setcolorder(c("file_segment", "table_content", "reference", "restriction",
"table_number", "table_name", "universe"))
save(lookup_acs1year_2005, file = "data/lookup_acs1year_2005.RData",
compress = "xz", compression_level = 9)
return(lookup_acs1year_2005)
} else {
dt <- setDT(dt) %>%
.[, c(2, 3, 4, 8), with = FALSE] %>%
setnames(1:4, c("table_number", "file_segment", "reference", "table_name"))
}
univ <- dt[, .SD[2], by = .(table_number)] %>%
.[, .(table_number, universe = table_name)] %>%
setkey(table_number)
# some lookup file contain reference like "0.5", "2.7" that are not in the
# raw data, remove them otherwise will cause column problem.
# in 2007 acs1year, reference of table_content == Universe: xxxx is not
# empty but "0"
content <- dt[reference != "" & reference != "0" & !grepl("\\.", reference)] %>%
setnames("table_name", "table_content") %>%
# change the reference from 1, 12, ... to 001, 012, ...
.[str_length(reference) == 1, reference := paste0("00", reference)] %>%
.[str_length(reference) == 2, reference := paste0("0", reference)] %>%
.[, reference := paste0(table_number, "_", reference)] %>%
# the 1-year 2014 lookup file has 1, 2, 3, ... as file_segment
# change to 0001, 0002, ...
.[str_length(file_segment) == 1, file_segment := paste0("000", file_segment)] %>%
.[str_length(file_segment) == 2, file_segment := paste0("00", file_segment)] %>%
.[str_length(file_segment) == 3, file_segment := paste0("0", file_segment)] %>%
setkey(table_number)
# Appendices.xls files are not available for years earlier than 2013
if (year >= 2013){
file_restriction <- paste0(
"data_raw/acs/",
"ACS_", year, "_SF_", period, "YR_Appendices.xls"
)
restrict <- read_excel(file_restriction) %>%
.[, c(1, 3)] %>%
setDT() %>%
setnames(c("table_number", "restriction")) %>%
unique()
} else {
# make a fake restriction (unknown restriction)
restrict <- dt[, .(table_number)] %>%
.[, restriction := "unknown"] %>%
unique()
}
tabl <- dt[, .SD[1], by = .(table_number)] %>%
.[, .(table_number, table_name)] %>%
restrict[., on = .(table_number)] %>%
setkey(table_number)
dict <- tabl[content] %>%
univ[.] %>%
setcolorder(c("file_segment", "table_content", "reference", "restriction",
"table_number", "table_name", "universe")) %>%
.[order(file_segment)]
# if (year == 2005){
# # use 2006 table name to replace 2005 table name
# L2006 <- lookup_acs1year_2006[, .(reference, table_name)]
# }
# save to R/data/
dict_name <- paste0("lookup_acs", period, "year_", year)
assign(dict_name, dict)
save_as <- paste0("data/lookup_acs", period, "year_", year, ".RData" )
save(list = dict_name, file = save_as,
compress = "xz", compression_level = 9)
return(dict)
}
# ACS 5-year
lookup_acs5year_2017 <- make_acs_lookup(5, 2017)
lookup_acs5year_2016 <- make_acs_lookup(5, 2016)
lookup_acs5year_2015 <- make_acs_lookup(5, 2015)
lookup_acs5year_2014 <- make_acs_lookup(5, 2014)
lookup_acs5year_2013 <- make_acs_lookup(5, 2013)
lookup_acs5year_2012 <- make_acs_lookup(5, 2012)
lookup_acs5year_2011 <- make_acs_lookup(5, 2011)
lookup_acs5year_2010 <- make_acs_lookup(5, 2010)
lookup_acs5year_2009 <- make_acs_lookup(5, 2009)
# ACS 1-year
lookup_acs1year_2017 <- make_acs_lookup(1, 2017)
lookup_acs1year_2016 <- make_acs_lookup(1, 2016)
lookup_acs1year_2015 <- make_acs_lookup(1, 2015)
lookup_acs1year_2014 <- make_acs_lookup(1, 2014)
lookup_acs1year_2013 <- make_acs_lookup(1, 2013)
lookup_acs1year_2012 <- make_acs_lookup(1, 2012)
lookup_acs1year_2011 <- make_acs_lookup(1, 2011)
lookup_acs1year_2010 <- make_acs_lookup(1, 2010)
lookup_acs1year_2009 <- make_acs_lookup(1, 2009)
lookup_acs1year_2008 <- make_acs_lookup(1, 2008)
lookup_acs1year_2007 <- make_acs_lookup(1, 2007)
lookup_acs1year_2006 <- make_acs_lookup(1, 2006)
lookup_acs1year_2005 <- make_acs_lookup(1, 2005)
# # save to data/
# save(lookup_acs5year_2016, file = "data/lookup_acs5year_2016.RData",
# compress = "xz", compression_level = 9)
# save(lookup_acs5year_2015, file = "data/lookup_acs5year_2015.RData",
# compress = "xz", compression_level = 9)
# save(lookup_acs5year_2010, file = "data/lookup_acs5year_2010.RData",
# compress = "xz", compression_level = 9)
#
# save(lookup_acs1year_2017, file = "data/lookup_acs1year_2017.RData",
# compress = "xz", compression_level = 9)
# save(lookup_acs1year_2016, file = "data/lookup_acs1year_2016.RData",
# compress = "xz", compression_level = 9)
# save(lookup_acs1year_2015, file = "data/lookup_acs1year_2015.RData",
# compress = "xz", compression_level = 9)
# save(lookup_acs1year_2014, file = "data/lookup_acs1year_2014.RData",
# compress = "xz", compression_level = 9)
# save(lookup_acs1year_2010, file = "data/lookup_acs1year_2010.RData",
# compress = "xz", compression_level = 9)
|
test_that("list and defaultlist behave the same when elements exist", {
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Starting with en empty dlist
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
dl <- empty_dlist(0)
ll <- list()
expect_true(is.list(dl))
expect_true(is.list(ll))
expect_equivalent(dl, ll)
ll$a <- 1
ll$b <- 2
ll$c <- 3
dl$a <- 1
dl$b <- 2
dl$c <- 3
expect_equivalent(dl, ll)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Simple list
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
dl <- dlist(a = 1, b = 'hello', c = FALSE, .default = 0)
ll <- list(a = 1, b = 'hello', c = FALSE)
expect_true(is.list(dl))
expect_true(is.list(ll))
expect_equivalent(dl, ll)
for (i in seq_along(dl)) {
expect_identical(dl[[i]], ll[[i]])
expect_equivalent(dl[i] , ll[i])
}
expect_identical(dl$a, ll$a)
expect_identical(dl$b, ll$b)
expect_identical(dl$c, ll$c)
expect_equivalent(dl[ 0], ll[ 0])
expect_equivalent(dl[-1], ll[-1])
expect_equivalent(dl[-2], ll[-2])
expect_equivalent(dl[-3], ll[-3])
expect_equivalent(dl[c(1:3)], ll[c(1:3)])
expect_equivalent(dl[c(1:2)], ll[c(1:2)])
expect_equivalent(dl[c(2:3)], ll[c(2:3)])
expect_equivalent(dl[c(1,3)], ll[c(1,3)])
expect_equivalent(dl[0:2], ll[0:2])
expect_error(dl[[-1]], "attempt to select more than one element")
expect_error(ll[[-1]], "attempt to select more than one element")
expect_true(is.list(dl))
expect_true(is.list(ll))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Vector indexing of nested lists with `[[`
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
dl <- dlist(a = list(a = 1, b = 2, c = 3), d = 4, e = 5, .default = 0)
ll <- list(a = list(a = 1, b = 2, c = 3), d = 4, e = 5)
expect_equivalent(ll[[c(1, 2)]], dl[[c(1, 2)]])
expect_equivalent(ll[[c('a', 'c')]], dl[[c('a', 'c')]])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Starting with en empty dlist with a NULL default
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
dl <- empty_dlist(NULL)
ll <- list()
expect_true(is.list(dl))
expect_true(is.list(ll))
expect_equivalent(dl, ll)
ll$a <- 1
ll$b <- 2
ll$c <- 3
dl$a <- 1
dl$b <- 2
dl$c <- 3
expect_equivalent(dl, ll)
expect_null(dl$d)
expect_null(dl[[6]])
})
| /tests/testthat/test-list-like-behaviour.R | permissive | coolbutuseless/defaultlist | R | false | false | 2,550 | r |
test_that("list and defaultlist behave the same when elements exist", {
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Starting with en empty dlist
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
dl <- empty_dlist(0)
ll <- list()
expect_true(is.list(dl))
expect_true(is.list(ll))
expect_equivalent(dl, ll)
ll$a <- 1
ll$b <- 2
ll$c <- 3
dl$a <- 1
dl$b <- 2
dl$c <- 3
expect_equivalent(dl, ll)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Simple list
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
dl <- dlist(a = 1, b = 'hello', c = FALSE, .default = 0)
ll <- list(a = 1, b = 'hello', c = FALSE)
expect_true(is.list(dl))
expect_true(is.list(ll))
expect_equivalent(dl, ll)
for (i in seq_along(dl)) {
expect_identical(dl[[i]], ll[[i]])
expect_equivalent(dl[i] , ll[i])
}
expect_identical(dl$a, ll$a)
expect_identical(dl$b, ll$b)
expect_identical(dl$c, ll$c)
expect_equivalent(dl[ 0], ll[ 0])
expect_equivalent(dl[-1], ll[-1])
expect_equivalent(dl[-2], ll[-2])
expect_equivalent(dl[-3], ll[-3])
expect_equivalent(dl[c(1:3)], ll[c(1:3)])
expect_equivalent(dl[c(1:2)], ll[c(1:2)])
expect_equivalent(dl[c(2:3)], ll[c(2:3)])
expect_equivalent(dl[c(1,3)], ll[c(1,3)])
expect_equivalent(dl[0:2], ll[0:2])
expect_error(dl[[-1]], "attempt to select more than one element")
expect_error(ll[[-1]], "attempt to select more than one element")
expect_true(is.list(dl))
expect_true(is.list(ll))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Vector indexing of nested lists with `[[`
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
dl <- dlist(a = list(a = 1, b = 2, c = 3), d = 4, e = 5, .default = 0)
ll <- list(a = list(a = 1, b = 2, c = 3), d = 4, e = 5)
expect_equivalent(ll[[c(1, 2)]], dl[[c(1, 2)]])
expect_equivalent(ll[[c('a', 'c')]], dl[[c('a', 'c')]])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Starting with en empty dlist with a NULL default
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
dl <- empty_dlist(NULL)
ll <- list()
expect_true(is.list(dl))
expect_true(is.list(ll))
expect_equivalent(dl, ll)
ll$a <- 1
ll$b <- 2
ll$c <- 3
dl$a <- 1
dl$b <- 2
dl$c <- 3
expect_equivalent(dl, ll)
expect_null(dl$d)
expect_null(dl[[6]])
})
|
# Create different vectors
val = 1:20
print(val)
val = 20:1
print(val)
val = c(1:20, 19:1)
print(val)
tmp = c(4, 6, 3)
print(tmp) | /Day 2/11.R | no_license | eElec/DALab | R | false | false | 133 | r | # Create different vectors
val = 1:20
print(val)
val = 20:1
print(val)
val = c(1:20, 19:1)
print(val)
tmp = c(4, 6, 3)
print(tmp) |
shinyUI(fluidPage(
titlePanel(
h3("MPG Calculation",align="left")
),
sidebarLayout(
sidebarPanel(
h3("Instruction:"),
helpText("Please select manual/automatic transmission,weight of car,and 1/4 mile time of car,and click 'Submit'."),
radioButtons("am",label=h5("Transmission"),choices=list("manual"=0,"automatic"=1),selected=1),
sliderInput("weight",label=h5("Weight of Car (lb/1000)"),min=1.5,max=5.5,step=0.1,value=2.0),
sliderInput("qsec",label=h5("Car Speed:1/4 mile time in seconds"),min=14.5,max=23.0,step=0.1,value=10.0),
submitButton("Submit")
),
mainPanel(
h4("Note:"),
h5("The application is a demonstration of calculation on MPG. We used the dataset of 'mtcars'.The data was extracted from the 1974 Motor Trend US magazine, and comprises fuel consumption and 10 aspects of automobile design and performance for 32 automobiles (1973–74 models).I developed a regression model between the outcome mpg and variables."),
h5("The regression model we developed has Adjusted R-squared of 0.9862.The regression model is:"),
em("mpg=9.6178factor(am)0+12.5536factor(am)1-3.9165wt+1.2259qsec"),
h5("'am' is transmission(manual=0,automatic=1),'wt' is weight of car,'qsec' is speed in 1/4mile time"),
h5("we use the value range as variable value range in the 'mtcars'dataset."),
h5("You may input transmission,weight and speed in the left side as instrustion, and you'll get MPG result in the right side."),
h4("TRY IT!"),
h5("=========================================================================================="),
strong("You have selected:"),
textOutput("text1"),
br(),
strong("The Weight of Car is"),
textOutput("text2"),
br(),
strong("The Car Speed of 1/4 mile time in second is"),
textOutput("text3"),
br(),
strong("The MPG of the car is"),
div( textOutput("result"),style="color:red"))
)
)
) | /shinyproject/ui.R | no_license | Maggiebj/DDP-project | R | false | false | 2,173 | r | shinyUI(fluidPage(
titlePanel(
h3("MPG Calculation",align="left")
),
sidebarLayout(
sidebarPanel(
h3("Instruction:"),
helpText("Please select manual/automatic transmission,weight of car,and 1/4 mile time of car,and click 'Submit'."),
radioButtons("am",label=h5("Transmission"),choices=list("manual"=0,"automatic"=1),selected=1),
sliderInput("weight",label=h5("Weight of Car (lb/1000)"),min=1.5,max=5.5,step=0.1,value=2.0),
sliderInput("qsec",label=h5("Car Speed:1/4 mile time in seconds"),min=14.5,max=23.0,step=0.1,value=10.0),
submitButton("Submit")
),
mainPanel(
h4("Note:"),
h5("The application is a demonstration of calculation on MPG. We used the dataset of 'mtcars'.The data was extracted from the 1974 Motor Trend US magazine, and comprises fuel consumption and 10 aspects of automobile design and performance for 32 automobiles (1973–74 models).I developed a regression model between the outcome mpg and variables."),
h5("The regression model we developed has Adjusted R-squared of 0.9862.The regression model is:"),
em("mpg=9.6178factor(am)0+12.5536factor(am)1-3.9165wt+1.2259qsec"),
h5("'am' is transmission(manual=0,automatic=1),'wt' is weight of car,'qsec' is speed in 1/4mile time"),
h5("we use the value range as variable value range in the 'mtcars'dataset."),
h5("You may input transmission,weight and speed in the left side as instrustion, and you'll get MPG result in the right side."),
h4("TRY IT!"),
h5("=========================================================================================="),
strong("You have selected:"),
textOutput("text1"),
br(),
strong("The Weight of Car is"),
textOutput("text2"),
br(),
strong("The Car Speed of 1/4 mile time in second is"),
textOutput("text3"),
br(),
strong("The MPG of the car is"),
div( textOutput("result"),style="color:red"))
)
)
) |
## x=barcode names for cerebellum, y=barcode names for cortex
## probe.id=list of DM probes, data.type=cell type corrected or non-cell type corrected data (the entire data frame)
## outputs the table called probes which gives you a table of probe IDs, p.value, and adjusted p values for each probe
wilcox.test.probes <- function(x, y, probe.ids, data.type){
probes <- data.frame(probe.ids)
cerebellum <- data.type[as.character(probe.ids),x]
cortex <- data.type[as.character(probe.ids),y]
for (i in probe.ids){
a <- as.numeric(as.vector(cerebellum[as.character(i),]))
b <- as.numeric(as.vector(cortex[as.character(i),]))
probes[which(probes$probe.ids==as.character(i)),"p.value"] <- wilcox.test(a,b)$p.value
}
probes$adj.p <- p.adjust(probes$p.value,method="BH")
probes <- data.frame(probes)
probes
} | /data/processed_data/WilcoxTestProbes.R | no_license | suminwei2772/team_Methylhomies | R | false | false | 829 | r | ## x=barcode names for cerebellum, y=barcode names for cortex
## probe.id=list of DM probes, data.type=cell type corrected or non-cell type corrected data (the entire data frame)
## outputs the table called probes which gives you a table of probe IDs, p.value, and adjusted p values for each probe
wilcox.test.probes <- function(x, y, probe.ids, data.type){
probes <- data.frame(probe.ids)
cerebellum <- data.type[as.character(probe.ids),x]
cortex <- data.type[as.character(probe.ids),y]
for (i in probe.ids){
a <- as.numeric(as.vector(cerebellum[as.character(i),]))
b <- as.numeric(as.vector(cortex[as.character(i),]))
probes[which(probes$probe.ids==as.character(i)),"p.value"] <- wilcox.test(a,b)$p.value
}
probes$adj.p <- p.adjust(probes$p.value,method="BH")
probes <- data.frame(probes)
probes
} |
#' @title Calculate trail
#' @description Calculate the trail. For further information have a look at
#' \url{http://en.wikipedia.org/wiki/Bicycle_and_motorcycle_geometry}.
#' @param dia Diameter of the wheel including the tire [mm].
#' @param rake The rake, also known as fork offest [mm].
#' @param ha Head angle [degrees].
#' @details \code{calc_trail} uses trigonometric functions to calculate the
#' trail. Please refer to \code{vignette("velo_trail", package = "velo")} for
#' a detailed description and mathematical proof.
#' @author Dirk Haas, Jannes Muenchow
#' @return The function returns a numeric representing the trail [mm].
#' @export
#' @examples
#' calc_trail(dia = 700, rake = 58, ha = 73)
calc_trail <- function(dia = NULL, rake = NULL, ha = NULL) {
# test
args <- c("dia", "rake", "ha")
ind <- mapply(function(x) is.null(get(x)), as.list(args))
if (any(ind)) {
stop("Please specify: ",
paste(args[ind], collapse = ", "))
}
# calculate alpha
alpha <- 90 - ha
# convert to radians
alpha <- alpha * pi / 180
# calculate hypotenuse of the upper triangle (hypo = opposite leg (rake) /
# sin(alpha))
hypo <- rake / sin(alpha)
# opposite leg (of the lower triangle) = radius - hypotenuse
opp_leg <- dia / 2 - hypo
# calculate trail using tangens
opp_leg / tan(ha * pi / 180)
}
| /R/calc_trail.R | no_license | jannes-m/velo | R | false | false | 1,346 | r | #' @title Calculate trail
#' @description Calculate the trail. For further information have a look at
#' \url{http://en.wikipedia.org/wiki/Bicycle_and_motorcycle_geometry}.
#' @param dia Diameter of the wheel including the tire [mm].
#' @param rake The rake, also known as fork offest [mm].
#' @param ha Head angle [degrees].
#' @details \code{calc_trail} uses trigonometric functions to calculate the
#' trail. Please refer to \code{vignette("velo_trail", package = "velo")} for
#' a detailed description and mathematical proof.
#' @author Dirk Haas, Jannes Muenchow
#' @return The function returns a numeric representing the trail [mm].
#' @export
#' @examples
#' calc_trail(dia = 700, rake = 58, ha = 73)
calc_trail <- function(dia = NULL, rake = NULL, ha = NULL) {
# test
args <- c("dia", "rake", "ha")
ind <- mapply(function(x) is.null(get(x)), as.list(args))
if (any(ind)) {
stop("Please specify: ",
paste(args[ind], collapse = ", "))
}
# calculate alpha
alpha <- 90 - ha
# convert to radians
alpha <- alpha * pi / 180
# calculate hypotenuse of the upper triangle (hypo = opposite leg (rake) /
# sin(alpha))
hypo <- rake / sin(alpha)
# opposite leg (of the lower triangle) = radius - hypotenuse
opp_leg <- dia / 2 - hypo
# calculate trail using tangens
opp_leg / tan(ha * pi / 180)
}
|
#' Plot Exonic Mapping Rate
#'
#' Ideally, at least 60 percent of total reads should map to exons.
#'
#' @name plotExonicMappingRate
#' @family Quality Control Functions
#' @author Michael Steinbaugh, Rory Kirchner, Victor Barrera
#'
#' @inheritParams general
#'
#' @return `ggplot`.
#'
#' @examples
#' plotExonicMappingRate(bcb_small)
NULL
#' @rdname plotExonicMappingRate
#' @export
setMethod(
"plotExonicMappingRate",
signature("bcbioRNASeq"),
function(
object,
interestingGroups,
limit = 0.6,
fill = getOption("bcbio.discrete.fill", NULL),
flip = getOption("bcbio.flip", TRUE),
title = "exonic mapping rate"
) {
validObject(object)
interestingGroups <- matchInterestingGroups(
object = object,
interestingGroups = interestingGroups
)
assert_is_a_number(limit)
assert_all_are_non_negative(limit)
assertIsFillScaleDiscreteOrNULL(fill)
assert_is_a_bool(flip)
assertIsAStringOrNULL(title)
p <- metrics(object) %>%
ggplot(
mapping = aes(
x = !!sym("sampleName"),
y = !!sym("exonicRate") * 100L,
fill = !!sym("interestingGroups")
)
) +
geom_bar(
color = "black",
stat = "identity"
) +
labs(
title = title,
x = NULL,
y = "exonic mapping rate (%)",
fill = paste(interestingGroups, collapse = ":\n")
)
if (is_positive(limit)) {
# Convert to percentage
if (limit > 1L) {
# nocov start
warning("`limit`: Use ratio (0-1) instead of percentage")
# nocov end
} else {
limit <- limit * 100L
}
if (limit < 100L) {
p <- p + bcbio_geom_abline(yintercept = limit)
}
}
if (is(fill, "ScaleDiscrete")) {
p <- p + fill
}
if (isTRUE(flip)) {
p <- p + coord_flip()
}
if (identical(interestingGroups, "sampleName")) {
p <- p + guides(fill = FALSE)
}
p
}
)
| /R/plotExonicMappingRate-methods.R | permissive | enginbozaba/bcbioRNASeq | R | false | false | 2,327 | r | #' Plot Exonic Mapping Rate
#'
#' Ideally, at least 60 percent of total reads should map to exons.
#'
#' @name plotExonicMappingRate
#' @family Quality Control Functions
#' @author Michael Steinbaugh, Rory Kirchner, Victor Barrera
#'
#' @inheritParams general
#'
#' @return `ggplot`.
#'
#' @examples
#' plotExonicMappingRate(bcb_small)
NULL
#' @rdname plotExonicMappingRate
#' @export
setMethod(
"plotExonicMappingRate",
signature("bcbioRNASeq"),
function(
object,
interestingGroups,
limit = 0.6,
fill = getOption("bcbio.discrete.fill", NULL),
flip = getOption("bcbio.flip", TRUE),
title = "exonic mapping rate"
) {
validObject(object)
interestingGroups <- matchInterestingGroups(
object = object,
interestingGroups = interestingGroups
)
assert_is_a_number(limit)
assert_all_are_non_negative(limit)
assertIsFillScaleDiscreteOrNULL(fill)
assert_is_a_bool(flip)
assertIsAStringOrNULL(title)
p <- metrics(object) %>%
ggplot(
mapping = aes(
x = !!sym("sampleName"),
y = !!sym("exonicRate") * 100L,
fill = !!sym("interestingGroups")
)
) +
geom_bar(
color = "black",
stat = "identity"
) +
labs(
title = title,
x = NULL,
y = "exonic mapping rate (%)",
fill = paste(interestingGroups, collapse = ":\n")
)
if (is_positive(limit)) {
# Convert to percentage
if (limit > 1L) {
# nocov start
warning("`limit`: Use ratio (0-1) instead of percentage")
# nocov end
} else {
limit <- limit * 100L
}
if (limit < 100L) {
p <- p + bcbio_geom_abline(yintercept = limit)
}
}
if (is(fill, "ScaleDiscrete")) {
p <- p + fill
}
if (isTRUE(flip)) {
p <- p + coord_flip()
}
if (identical(interestingGroups, "sampleName")) {
p <- p + guides(fill = FALSE)
}
p
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/events.R
\name{read_keds2}
\alias{read_keds2}
\title{Read in KEDS output files}
\usage{
read_keds2(d, one.a.day = TRUE)
}
\arguments{
\item{d}{one or more names of KEDS output}
\item{one.a.day}{whether we should apply the one-a-day event filter?}
}
\value{
an event data set
}
\description{
Read in KEDS output files
}
| /man/read_keds2.Rd | no_license | conjugateprior/events | R | false | true | 398 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/events.R
\name{read_keds2}
\alias{read_keds2}
\title{Read in KEDS output files}
\usage{
read_keds2(d, one.a.day = TRUE)
}
\arguments{
\item{d}{one or more names of KEDS output}
\item{one.a.day}{whether we should apply the one-a-day event filter?}
}
\value{
an event data set
}
\description{
Read in KEDS output files
}
|
# This function processes simulated datasets created with the function make.pps.als. It takes the posterior phylograms and simulated alignments. The function creates the posterior predictive simulated distribution of phylograms. this function also requires the empirical dataset and the assumed topology.
make.pps.tr <- function(sims, empdat, topo){
topo$edge.length <- rep(1, length(topo$edge.length))
emphy <- optim.pml(pml(topo, empdat))
empmlik <- multlik(empdat)
empbl <- emphy$tree$edge.length
simbl <- emphy$tree$edge.length
simultlik <- vector()
for(i in 1:length(sims)){
ppsphy <- optim.pml(pml(topo, sims[[i]][[3]]))
simbl <- rbind(simbl, ppsphy$tree$edge.length)
simultlik[i] <- multlik(sims[[i]][[3]])
print(paste("Simulation", i, "processed"))
}
simbl <- simbl[2:nrow(simbl),]
return(list(empbl, simbl, empmlik, simultlik))
} | /clock_modad/make.pps.trs.R | no_license | duchene/modadclocks | R | false | false | 892 | r | # This function processes simulated datasets created with the function make.pps.als. It takes the posterior phylograms and simulated alignments. The function creates the posterior predictive simulated distribution of phylograms. this function also requires the empirical dataset and the assumed topology.
make.pps.tr <- function(sims, empdat, topo){
topo$edge.length <- rep(1, length(topo$edge.length))
emphy <- optim.pml(pml(topo, empdat))
empmlik <- multlik(empdat)
empbl <- emphy$tree$edge.length
simbl <- emphy$tree$edge.length
simultlik <- vector()
for(i in 1:length(sims)){
ppsphy <- optim.pml(pml(topo, sims[[i]][[3]]))
simbl <- rbind(simbl, ppsphy$tree$edge.length)
simultlik[i] <- multlik(sims[[i]][[3]])
print(paste("Simulation", i, "processed"))
}
simbl <- simbl[2:nrow(simbl),]
return(list(empbl, simbl, empmlik, simultlik))
} |
## -----------------------------------------------------------------------------------------------------------------
# Megatrends: Characterizing residual transmission
# assess_megatrends.r
#
# Amelia Bertozzi-Villa, Institute for Disease Modeling, University of Oxford
# December 2018
#
# As per the request of the WHO-SAGme, look at the megatrends + existing intervention forecasts and
# take a first pass at determining:
# 1. Whether pockets of residual transmission are determined by high initial transmission or something else;
# 2. If "something else", what mechanistic modeling scenarios result in higher residual transmission.
#
## -----------------------------------------------------------------------------------------------------------------------
library(raster)
library(data.table)
library(stats)
library(ggplot2)
library(rasterVis)
library(gridExtra)
rm(list=ls())
theme_set(theme_minimal())
root_dir <- ifelse(Sys.getenv("USERPROFILE")=="", Sys.getenv("HOME"))
base_dir <- file.path(root_dir, "Dropbox (IDM)/Malaria Team Folder/projects/map_intervention_impact")
main_dir <- file.path(base_dir, "/megatrends")
cluster_fname <- file.path(base_dir, "lookup_tables/interactions/africa_clusters_v4.tif")
anthro_endo_map <- data.table(cluster=1:6,
sitename=c("aba", "kananga", "kasama", "djibo", "gode", "moine"),
anthro=c(74.45, 65.02, 79.04, 76.6, 75, 75.78),
endo=c(80, 85, 80.38, 55.6, 50, 52.73))
anthro_endo_map[, human_indoor:= (anthro*endo)/100]
write.csv(anthro_endo_map, file=file.path(main_dir, "anthro_endo_map.csv"))
colors <- c("#00a08a", "#d71b5a", "#f2a200", "#f98400", "#902e57", "#5392c2")
# load in data, clip to africa
cluster_layer <- raster(cluster_fname)
megatrends_noints <- raster(file.path(main_dir, "pete_analysis", "actual_ssp2_base2000_2050.tif"))
megatrends_ints <- raster(file.path(main_dir, "pete_analysis", "actual_ssp2_base2016_2050.tif"))
int_type <- "itn_act"
if (int_type=="itn_act"){
interventions <- raster(file.path(main_dir, "abv_pfpr_africa_ITN.0.8..ACT.0.8..tif"))
interventions_pete <- raster(file.path(main_dir, "pete_analysis", "actual_ssp2_2050_ITN80ACT80-14.tif"))
int_label <- "ITN 80%, ACT 80%"
}else if (int_type=="itn_irs_act"){
interventions <- raster(file.path(main_dir, "abv_pfpr_africa_ITN.0.8..IRS.0.8..ACT.0.8..tif"))
interventions_pete <- raster(file.path(main_dir, "pete_analysis", "actual_ssp2_2050_ITN80IRS80ACT80-14.tif"))
int_label <- "ITN 80%, IRS 80%, ACT 80%"
}else{
stop(paste("unrecognized intervention type", int_type))
}
# ensure consistent extents in all rasters, and
# set near-zero areas to zero.
cutoff_pr <- 0.0001
cluster_layer <- crop(cluster_layer, interventions)
megatrends_orig <- copy(megatrends_noints)
rastlist <- list("megatrends_noints"=megatrends_noints, "megatrends_ints"=megatrends_ints,
"interventions"=interventions, "interventions_pete"=interventions_pete)
print("cropping and removing zeros")
for (rastname in names(rastlist)){
rast <- rastlist[[rastname]]
rast[rast<cutoff_pr] <- 0
newrast <- crop(rast, cluster_layer)
names(newrast) <- rastname
assign(rastname, newrast)
}
# mask clusters to megatrends
cluster_layer <- raster::mask(cluster_layer, megatrends_noints)
names(cluster_layer) <- "cluster"
# my "interventions" dataset is unbounded by the "megatrends with interventions" dataset.
# I need to bound it to be comparable to Pete's
# (and to explore the difference between bounded and unbounded residual transmission)
# TODO: find reason for differences between my and Pete's bounded rasters.
bounded_interventions <-min(stack(interventions,megatrends_ints))
names(bounded_interventions) <- "bounded_interventions"
megatrends_diff <- interventions - bounded_interventions
pal <- c("#e0e0e0", rev(brewer.pal(11, "Spectral")))
breaks <- c(0, seq(0.01, 1, length.out=11))
levelplot(stack(bounded_interventions, interventions_pete), par.settings=rasterTheme(pal), at=breaks, xlab=NULL, ylab=NULL, margin=F, scales=list(draw=F))
# compare my bounded intervention to Pete's
abv_pg_diff <- bounded_interventions - interventions_pete
plot(abv_pg_diff>0.01 | abv_pg_diff<(-0.01), main=">1% difference between ABV and PG")
# spatially disaggregate pixels that track lookup table vs megatrends
for_residplot <- copy(cluster_layer)
for_residplot[1] <- 0
resid_clust <- copy(cluster_layer)
resid_clust[bounded_interventions<cutoff_pr] <- 0
stacked <- stack(ratify(for_residplot), ratify(resid_clust))
names(stacked) <- c("Full Cluster", "Residual Transmission")
pdf(file.path(main_dir, "cluster_plots.pdf"), width=12, height=6)
cluster_plot <- levelplot(stacked, att="ID", col.regions=c("#A9A9A9", colors),
xlab=NULL, ylab=NULL, scales=list(draw=F),
main = int_label, colorkey=F, margin=F)
print(cluster_plot)
graphics.off()
# explore areas of residual transmission--------------------------
raster_to_dt <- function(rast){
vals <- as.matrix(rast)
vals <- data.table(id = which(!is.na(vals) & !is.infinite(vals)),
value = vals[!is.na(vals) & !is.infinite(vals)])
vals[, type:= names(rast)]
return(vals)
}
reduction_dt <- lapply(list(interventions, bounded_interventions, megatrends_ints, megatrends_noints, cluster_layer), raster_to_dt)
reduction_dt <- rbindlist(reduction_dt)
reduction_dt <- dcast.data.table(reduction_dt, id ~ type)
reduction_dt <- reduction_dt[complete.cases(reduction_dt)]
reduction_dt <- reduction_dt[megatrends_noints>0]
reduction_dt <- merge(reduction_dt, anthro_endo_map, by="cluster", all=T)
reduction_dt[, cluster:=as.factor(cluster)]
reduction_dt[, eliminate:=floor(1-bounded_interventions)]
reduction_dt[, init_prev_class:= cut(megatrends_noints, breaks=seq(0,1,0.05),
labels=c("0-5", "5-10", "10-15", "15-20", "20-25", "25-30", "30-35",
"35-40", "40-45", "45-50", "50-55", "55-60", "60-65", "65-70",
"70-75", "75-80", "80-85", "85-90", "90-95", "95-100"))]
summary_dt <- reduction_dt[,lapply(.SD, mean), by=list(sitename, cluster, init_prev_class)]
summary_dt[, human_indoor:=round(human_indoor, 2)]
summary_dt <- merge(summary_dt, reduction_dt[, list(count=.N), by=list(cluster, init_prev_class)],
by=c("cluster", "init_prev_class"), all=T)
summary_dt[, count_class:=cut(count, breaks=quantile(count, probs=seq(0,1,0.2)),
labels=c("4-161", "162-1,743", "1,744-7,839", "7,840-14,003", "14,004-59,512"), include.lowest=T)]
indoor_vals <- sort(unique(summary_dt$human_indoor), decreasing = T)
summary_dt[, human_indoor_factor:= factor(human_indoor, levels=indoor_vals, labels=as.character(indoor_vals))]
elim_plot <- ggplot(summary_dt, aes(x=init_prev_class, y=human_indoor)) +
geom_point(aes(color=eliminate, size=count_class), shape=15) +
scale_size_discrete(name="Pixel Count") +
scale_color_gradientn(colors=brewer.pal(7,"Spectral"), name="Proportion of\nPixels Eliminating") +
theme(axis.text.x = element_text(angle=45, hjust=1)) +
labs(x="Initial PfPR (%)",
y="Indoor Biting (%)",
title=paste("Probability of Elimination Under", int_label))
resid_plot <- ggplot(summary_dt, aes(x=init_prev_class, y=human_indoor)) +
geom_point(aes(color=bounded_interventions, size=count_class), shape=15) +
scale_size_discrete(name="Pixel Count") +
scale_color_gradientn(colors=rev(brewer.pal(7,"Spectral")), name="Mean Final\nPrevalence") +
theme(axis.text.x = element_text(angle=45, hjust=1)) +
labs(x="Initial PfPR (%)",
y="Indoor Biting (%)",
title=paste("Residual Transmission Under", int_label))
# plot climate time series next to indoor biting maps
climate <- fread(file.path(base_dir, "seasonal_classification/africa/kmeans/summary_tsi_rainfall_vector_abundance_6.csv"))
climate <- climate[variable_name=="month"]
climate[, cluster:=as.factor(cluster)]
climate_to_sitename = data.table(sitename=c("aba", "kananga", "kasama", "djibo", "gode", "moine"),
cluster=1:6)
climate <- merge(climate, climate_to_sitename, by="cluster", all=T)
climate <- merge(climate, unique(summary_dt[, list(sitename,human_indoor_factor)]), by="sitename", all=T)
palette <- c("#F98400", "#00A08A", "#5392C2", "#902E57", "#F2AD00", "#D71B5A", "#98B548", "#8971B3")
these_colors <- palette[1:6]
lines <- lapply(unique(climate$cov), function(cov_value){
data <- climate[cov==cov_value]
if(max(data$perc_95)>1){
ybreaks <- c(0, 600)
ylabs <- c("0", "600")
yminorbreaks <- c(100, 200, 300, 400, 500)
ylimit <- NULL
}else if(max(data$perc_95)<0.75){
ybreaks <- c(0, 0.75)
ylabs <- c("0", "0.75")
yminorbreaks <- c(0.25, 0.5)
ylimit <- c(0, 0.75)
}else{
ybreaks <- c(0, 0.75)
ylabs <- c("0", "0.75")
yminorbreaks <- c(0.25, 0.5)
ylimit <- NULL
}
ggplot(data, aes(x=as.integer(variable_val), y=median, color=cluster, fill=cluster)) +
facet_grid(human_indoor_factor~.) +
geom_line(size=1) +
geom_line(aes(y=perc_05), size=0.75, linetype=2) +
geom_line(aes(y=perc_95), size=0.75, linetype=2) +
geom_ribbon(aes(ymin=perc_25, ymax=perc_75), alpha=0.5, color=NA) +
scale_color_manual(values = these_colors) +
scale_fill_manual(values = these_colors) +
scale_x_continuous(breaks=seq(2,12,2), labels=c("F","A","J","A","O","D"), minor_breaks=seq(1,12,2)) +
scale_y_continuous(breaks=ybreaks, labels=ylabs, minor_breaks=yminorbreaks, limits=ylimit) +
theme(legend.position="none",
plot.title = element_text(size=16),
strip.background = element_blank(),
# strip.text.y = element_blank()
) +
labs(title=ifelse(nchar(cov_value)==3, toupper(cov_value), capitalize(cov_value)),
x="",
y="")
})
pdf(file.path(main_dir, paste0("resid_by_endo_with_climate_", int_type, ".pdf")), height=7, width=20)
grid.arrange(grobs=append(list(elim_plot, resid_plot), lines), layout_matrix=rbind(c(1,1,1,2,2,2,3,4)))
graphics.off()
png(file.path(main_dir, "megatrend_compare.png"), width=900, height=600)
ggplot(reduction_dt, aes(x=megatrends_noints, y=megatrends_ints)) +
geom_point(alpha=0.25) +
facet_wrap(~cluster) +
geom_abline() +
theme_minimal() +
theme(legend.position="none") +
xlim(0,1) +
labs(title="Megatrend (Base 2000) vs Megatrend (Base 2016), \n by Transmission Archetype",
x="Megatrends, Base 2000",
y="Megatrends, Base 2016")
graphics.off()
png(file.path(main_dir, "megatrend_int_impact.png"), width=900, height=600)
ggplot(reduction_dt[bounded_interventions>cutoff_pr], aes(x=megatrends_noints, y=bounded_interventions)) +
# geom_point(aes(y=megatrends_ints), alpha=0.1) +
geom_point(aes(color=cluster), alpha=0.5) +
scale_color_manual(values=colors) +
facet_wrap(~cluster) +
geom_abline() +
theme_minimal() +
theme(legend.position="none") +
xlim(0,1) +
labs(title="Megatrend (Base 2000) vs Megatrend Plus Interventions, \n by Transmission Archetype",
x="Megatrend, Base 2000",
y=paste("Megatrend Base 2000 +", int_label))
graphics.off()
| /megatrends/assess_residual_transmission.r | no_license | InstituteforDiseaseModeling/archetypes-intervention-impact | R | false | false | 11,537 | r | ## -----------------------------------------------------------------------------------------------------------------
# Megatrends: Characterizing residual transmission
# assess_megatrends.r
#
# Amelia Bertozzi-Villa, Institute for Disease Modeling, University of Oxford
# December 2018
#
# As per the request of the WHO-SAGme, look at the megatrends + existing intervention forecasts and
# take a first pass at determining:
# 1. Whether pockets of residual transmission are determined by high initial transmission or something else;
# 2. If "something else", what mechanistic modeling scenarios result in higher residual transmission.
#
## -----------------------------------------------------------------------------------------------------------------------
library(raster)
library(data.table)
library(stats)
library(ggplot2)
library(rasterVis)
library(gridExtra)
rm(list=ls())
theme_set(theme_minimal())
root_dir <- ifelse(Sys.getenv("USERPROFILE")=="", Sys.getenv("HOME"))
base_dir <- file.path(root_dir, "Dropbox (IDM)/Malaria Team Folder/projects/map_intervention_impact")
main_dir <- file.path(base_dir, "/megatrends")
cluster_fname <- file.path(base_dir, "lookup_tables/interactions/africa_clusters_v4.tif")
anthro_endo_map <- data.table(cluster=1:6,
sitename=c("aba", "kananga", "kasama", "djibo", "gode", "moine"),
anthro=c(74.45, 65.02, 79.04, 76.6, 75, 75.78),
endo=c(80, 85, 80.38, 55.6, 50, 52.73))
anthro_endo_map[, human_indoor:= (anthro*endo)/100]
write.csv(anthro_endo_map, file=file.path(main_dir, "anthro_endo_map.csv"))
colors <- c("#00a08a", "#d71b5a", "#f2a200", "#f98400", "#902e57", "#5392c2")
# load in data, clip to africa
cluster_layer <- raster(cluster_fname)
megatrends_noints <- raster(file.path(main_dir, "pete_analysis", "actual_ssp2_base2000_2050.tif"))
megatrends_ints <- raster(file.path(main_dir, "pete_analysis", "actual_ssp2_base2016_2050.tif"))
int_type <- "itn_act"
if (int_type=="itn_act"){
interventions <- raster(file.path(main_dir, "abv_pfpr_africa_ITN.0.8..ACT.0.8..tif"))
interventions_pete <- raster(file.path(main_dir, "pete_analysis", "actual_ssp2_2050_ITN80ACT80-14.tif"))
int_label <- "ITN 80%, ACT 80%"
}else if (int_type=="itn_irs_act"){
interventions <- raster(file.path(main_dir, "abv_pfpr_africa_ITN.0.8..IRS.0.8..ACT.0.8..tif"))
interventions_pete <- raster(file.path(main_dir, "pete_analysis", "actual_ssp2_2050_ITN80IRS80ACT80-14.tif"))
int_label <- "ITN 80%, IRS 80%, ACT 80%"
}else{
stop(paste("unrecognized intervention type", int_type))
}
# ensure consistent extents in all rasters, and
# set near-zero areas to zero.
cutoff_pr <- 0.0001
cluster_layer <- crop(cluster_layer, interventions)
megatrends_orig <- copy(megatrends_noints)
rastlist <- list("megatrends_noints"=megatrends_noints, "megatrends_ints"=megatrends_ints,
"interventions"=interventions, "interventions_pete"=interventions_pete)
print("cropping and removing zeros")
for (rastname in names(rastlist)){
rast <- rastlist[[rastname]]
rast[rast<cutoff_pr] <- 0
newrast <- crop(rast, cluster_layer)
names(newrast) <- rastname
assign(rastname, newrast)
}
# mask clusters to megatrends
cluster_layer <- raster::mask(cluster_layer, megatrends_noints)
names(cluster_layer) <- "cluster"
# my "interventions" dataset is unbounded by the "megatrends with interventions" dataset.
# I need to bound it to be comparable to Pete's
# (and to explore the difference between bounded and unbounded residual transmission)
# TODO: find reason for differences between my and Pete's bounded rasters.
bounded_interventions <-min(stack(interventions,megatrends_ints))
names(bounded_interventions) <- "bounded_interventions"
megatrends_diff <- interventions - bounded_interventions
pal <- c("#e0e0e0", rev(brewer.pal(11, "Spectral")))
breaks <- c(0, seq(0.01, 1, length.out=11))
levelplot(stack(bounded_interventions, interventions_pete), par.settings=rasterTheme(pal), at=breaks, xlab=NULL, ylab=NULL, margin=F, scales=list(draw=F))
# compare my bounded intervention to Pete's
abv_pg_diff <- bounded_interventions - interventions_pete
plot(abv_pg_diff>0.01 | abv_pg_diff<(-0.01), main=">1% difference between ABV and PG")
# spatially disaggregate pixels that track lookup table vs megatrends
for_residplot <- copy(cluster_layer)
for_residplot[1] <- 0
resid_clust <- copy(cluster_layer)
resid_clust[bounded_interventions<cutoff_pr] <- 0
stacked <- stack(ratify(for_residplot), ratify(resid_clust))
names(stacked) <- c("Full Cluster", "Residual Transmission")
pdf(file.path(main_dir, "cluster_plots.pdf"), width=12, height=6)
cluster_plot <- levelplot(stacked, att="ID", col.regions=c("#A9A9A9", colors),
xlab=NULL, ylab=NULL, scales=list(draw=F),
main = int_label, colorkey=F, margin=F)
print(cluster_plot)
graphics.off()
# explore areas of residual transmission--------------------------
raster_to_dt <- function(rast){
vals <- as.matrix(rast)
vals <- data.table(id = which(!is.na(vals) & !is.infinite(vals)),
value = vals[!is.na(vals) & !is.infinite(vals)])
vals[, type:= names(rast)]
return(vals)
}
reduction_dt <- lapply(list(interventions, bounded_interventions, megatrends_ints, megatrends_noints, cluster_layer), raster_to_dt)
reduction_dt <- rbindlist(reduction_dt)
reduction_dt <- dcast.data.table(reduction_dt, id ~ type)
reduction_dt <- reduction_dt[complete.cases(reduction_dt)]
reduction_dt <- reduction_dt[megatrends_noints>0]
reduction_dt <- merge(reduction_dt, anthro_endo_map, by="cluster", all=T)
reduction_dt[, cluster:=as.factor(cluster)]
reduction_dt[, eliminate:=floor(1-bounded_interventions)]
reduction_dt[, init_prev_class:= cut(megatrends_noints, breaks=seq(0,1,0.05),
labels=c("0-5", "5-10", "10-15", "15-20", "20-25", "25-30", "30-35",
"35-40", "40-45", "45-50", "50-55", "55-60", "60-65", "65-70",
"70-75", "75-80", "80-85", "85-90", "90-95", "95-100"))]
summary_dt <- reduction_dt[,lapply(.SD, mean), by=list(sitename, cluster, init_prev_class)]
summary_dt[, human_indoor:=round(human_indoor, 2)]
summary_dt <- merge(summary_dt, reduction_dt[, list(count=.N), by=list(cluster, init_prev_class)],
by=c("cluster", "init_prev_class"), all=T)
summary_dt[, count_class:=cut(count, breaks=quantile(count, probs=seq(0,1,0.2)),
labels=c("4-161", "162-1,743", "1,744-7,839", "7,840-14,003", "14,004-59,512"), include.lowest=T)]
indoor_vals <- sort(unique(summary_dt$human_indoor), decreasing = T)
summary_dt[, human_indoor_factor:= factor(human_indoor, levels=indoor_vals, labels=as.character(indoor_vals))]
elim_plot <- ggplot(summary_dt, aes(x=init_prev_class, y=human_indoor)) +
geom_point(aes(color=eliminate, size=count_class), shape=15) +
scale_size_discrete(name="Pixel Count") +
scale_color_gradientn(colors=brewer.pal(7,"Spectral"), name="Proportion of\nPixels Eliminating") +
theme(axis.text.x = element_text(angle=45, hjust=1)) +
labs(x="Initial PfPR (%)",
y="Indoor Biting (%)",
title=paste("Probability of Elimination Under", int_label))
resid_plot <- ggplot(summary_dt, aes(x=init_prev_class, y=human_indoor)) +
geom_point(aes(color=bounded_interventions, size=count_class), shape=15) +
scale_size_discrete(name="Pixel Count") +
scale_color_gradientn(colors=rev(brewer.pal(7,"Spectral")), name="Mean Final\nPrevalence") +
theme(axis.text.x = element_text(angle=45, hjust=1)) +
labs(x="Initial PfPR (%)",
y="Indoor Biting (%)",
title=paste("Residual Transmission Under", int_label))
# plot climate time series next to indoor biting maps
climate <- fread(file.path(base_dir, "seasonal_classification/africa/kmeans/summary_tsi_rainfall_vector_abundance_6.csv"))
climate <- climate[variable_name=="month"]
climate[, cluster:=as.factor(cluster)]
climate_to_sitename = data.table(sitename=c("aba", "kananga", "kasama", "djibo", "gode", "moine"),
cluster=1:6)
climate <- merge(climate, climate_to_sitename, by="cluster", all=T)
climate <- merge(climate, unique(summary_dt[, list(sitename,human_indoor_factor)]), by="sitename", all=T)
palette <- c("#F98400", "#00A08A", "#5392C2", "#902E57", "#F2AD00", "#D71B5A", "#98B548", "#8971B3")
these_colors <- palette[1:6]
lines <- lapply(unique(climate$cov), function(cov_value){
data <- climate[cov==cov_value]
if(max(data$perc_95)>1){
ybreaks <- c(0, 600)
ylabs <- c("0", "600")
yminorbreaks <- c(100, 200, 300, 400, 500)
ylimit <- NULL
}else if(max(data$perc_95)<0.75){
ybreaks <- c(0, 0.75)
ylabs <- c("0", "0.75")
yminorbreaks <- c(0.25, 0.5)
ylimit <- c(0, 0.75)
}else{
ybreaks <- c(0, 0.75)
ylabs <- c("0", "0.75")
yminorbreaks <- c(0.25, 0.5)
ylimit <- NULL
}
ggplot(data, aes(x=as.integer(variable_val), y=median, color=cluster, fill=cluster)) +
facet_grid(human_indoor_factor~.) +
geom_line(size=1) +
geom_line(aes(y=perc_05), size=0.75, linetype=2) +
geom_line(aes(y=perc_95), size=0.75, linetype=2) +
geom_ribbon(aes(ymin=perc_25, ymax=perc_75), alpha=0.5, color=NA) +
scale_color_manual(values = these_colors) +
scale_fill_manual(values = these_colors) +
scale_x_continuous(breaks=seq(2,12,2), labels=c("F","A","J","A","O","D"), minor_breaks=seq(1,12,2)) +
scale_y_continuous(breaks=ybreaks, labels=ylabs, minor_breaks=yminorbreaks, limits=ylimit) +
theme(legend.position="none",
plot.title = element_text(size=16),
strip.background = element_blank(),
# strip.text.y = element_blank()
) +
labs(title=ifelse(nchar(cov_value)==3, toupper(cov_value), capitalize(cov_value)),
x="",
y="")
})
pdf(file.path(main_dir, paste0("resid_by_endo_with_climate_", int_type, ".pdf")), height=7, width=20)
grid.arrange(grobs=append(list(elim_plot, resid_plot), lines), layout_matrix=rbind(c(1,1,1,2,2,2,3,4)))
graphics.off()
png(file.path(main_dir, "megatrend_compare.png"), width=900, height=600)
ggplot(reduction_dt, aes(x=megatrends_noints, y=megatrends_ints)) +
geom_point(alpha=0.25) +
facet_wrap(~cluster) +
geom_abline() +
theme_minimal() +
theme(legend.position="none") +
xlim(0,1) +
labs(title="Megatrend (Base 2000) vs Megatrend (Base 2016), \n by Transmission Archetype",
x="Megatrends, Base 2000",
y="Megatrends, Base 2016")
graphics.off()
png(file.path(main_dir, "megatrend_int_impact.png"), width=900, height=600)
ggplot(reduction_dt[bounded_interventions>cutoff_pr], aes(x=megatrends_noints, y=bounded_interventions)) +
# geom_point(aes(y=megatrends_ints), alpha=0.1) +
geom_point(aes(color=cluster), alpha=0.5) +
scale_color_manual(values=colors) +
facet_wrap(~cluster) +
geom_abline() +
theme_minimal() +
theme(legend.position="none") +
xlim(0,1) +
labs(title="Megatrend (Base 2000) vs Megatrend Plus Interventions, \n by Transmission Archetype",
x="Megatrend, Base 2000",
y=paste("Megatrend Base 2000 +", int_label))
graphics.off()
|
# test_ISOFeatureType.R
# Author: Emmanuel Blondel <emmanuel.blondel1@gmail.com>
#
# Description: Unit tests for ISOFeatureType.R
#=======================
require(geometa, quietly = TRUE)
require(testthat)
context("ISOFeatureType")
test_that("encoding",{
testthat::skip_on_cran()
#featuretype
md <- ISOFeatureType$new()
md$setTypeName("typeName")
md$setDefinition("definition")
md$setCode("code")
md$setIsAbstract(FALSE)
md$addAlias("alias1")
md$addAlias("alias2")
#add feature attributes
for(i in 1:3){
#create attribute
fat <- ISOFeatureAttribute$new()
fat$setMemberName(sprintf("name %s",i))
fat$setDefinition(sprintf("definition %s",i))
fat$setCardinality(lower=1,upper=1)
fat$setCode(sprintf("code %s",i))
#add measurement unit
gml <- GMLBaseUnit$new(id = sprintf("ID%s",i))
gml$setDescriptionReference("someref")
gml$setIdentifier("identifier", "codespace")
gml$addName("name1", "codespace")
gml$addName("name2", "codespace")
gml$setQuantityTypeReference("someref")
gml$setCatalogSymbol("symbol")
gml$setUnitsSystem("somelink")
fat$setValueMeasurementUnit(gml)
#add listed values
val1 <- ISOListedValue$new()
val1$setCode("code1")
val1$setLabel("label1")
val1$setDefinition("definition1")
fat$addListedValue(val1)
val2 <- ISOListedValue$new()
val2$setCode("code2")
val2$setLabel("label2")
val2$setDefinition("definition2")
fat$addListedValue(val2)
fat$setValueType("typeName")
#add feature attribute as carrierOfCharacteristic
md$addCharacteristic(fat)
}
expect_is(md, "ISOFeatureType")
xml <- md$encode()
expect_is(xml, "XMLInternalNode")
#decoding
md2 <- ISOFeatureType$new(xml = xml)
xml2 <- md2$encode()
expect_true(ISOAbstractObject$compare(md, md2))
})
test_that("encoding",{
testthat::skip_on_cran()
#featuretype
md <- ISOFeatureType$new()
md$setTypeName("typeName")
md$setDefinition(
"description",
locales = list(
EN = "the description",
FR = "la description",
ES = "la descripción",
AR = "الوصف",
RU = "описание",
ZH = "描述"
)
)
md$setCode("code")
md$setIsAbstract(FALSE)
md$addAlias("alias1")
md$addAlias("alias2")
#add feature attributes
for(i in 1:3){
#create attribute
fat <- ISOFeatureAttribute$new()
fat$setMemberName(sprintf("name%s",i))
fat$setDefinition(
sprintf("description %s",i),
locales = list(
EN = sprintf("the description %s",i),
FR = sprintf("la description %s",i),
ES = sprintf("la descripción %s",i),
AR = sprintf("%s الوصف)",i),
RU = sprintf("описание %s",i),
ZH = sprintf("描述 %s",i)
)
)
fat$setCardinality(lower=1,upper=1)
fat$setCode("code 1")
#add measurement unit
gml <- GMLBaseUnit$new(id = sprintf("ID%s",i))
gml$setDescriptionReference("someref")
gml$setIdentifier("identifier", "codespace")
gml$addName("name1", "codespace")
gml$addName("name2", "codespace")
gml$setQuantityTypeReference("someref")
gml$setCatalogSymbol("symbol")
gml$setUnitsSystem("somelink")
fat$setValueMeasurementUnit(gml)
#add listed values
val1 <- ISOListedValue$new()
val1$setCode("code1")
val1$setLabel(
"name in english 1",
locales = list(
EN = "name in english 1",
FR = "nom en français 1",
ES = "Nombre en español 1",
AR = "1 الاسم باللغة العربية",
RU = "имя на русском 1",
ZH = "中文名 1"
))
val1$setDefinition(
"definition in english 1",
locales = list(
EN = "definition in english 1",
FR = "définition en français 1",
ES = "definición en español 1",
AR = "1 التعريف باللغة العربية ",
RU = "Русское определение 1",
ZH = "中文定义1"
))
fat$addListedValue(val1)
val2 <- ISOListedValue$new()
val2$setCode("code2")
val2$setLabel(
"name in english 2",
locales = list(
EN = "name in english 2",
FR = "nom en français 2",
ES = "Nombre en español 2",
AR = "2 الاسم باللغة العربية",
RU = "имя на русском 2",
ZH = "中文名 2"
))
val2$setDefinition(
"definition in english 2",
locales = list(
EN = "definition in english 2",
FR = "définition en français 2",
ES = "definición en español 2",
AR = "2 التعريف باللغة العربية ",
RU = "Русское определение 2",
ZH = "中文定义2"
))
fat$addListedValue(val2)
fat$setValueType("typeName")
#add feature attribute as carrierOfCharacteristic
md$addCharacteristic(fat)
}
expect_is(md, "ISOFeatureType")
xml <- md$encode()
expect_is(xml, "XMLInternalNode")
#decoding
md2 <- ISOFeatureType$new(xml = xml)
xml2 <- md2$encode()
expect_true(ISOAbstractObject$compare(md, md2))
}) | /tests/testthat/test_ISOFeatureType.R | no_license | cran/geometa | R | false | false | 5,366 | r | # test_ISOFeatureType.R
# Author: Emmanuel Blondel <emmanuel.blondel1@gmail.com>
#
# Description: Unit tests for ISOFeatureType.R
#=======================
require(geometa, quietly = TRUE)
require(testthat)
context("ISOFeatureType")
test_that("encoding",{
testthat::skip_on_cran()
#featuretype
md <- ISOFeatureType$new()
md$setTypeName("typeName")
md$setDefinition("definition")
md$setCode("code")
md$setIsAbstract(FALSE)
md$addAlias("alias1")
md$addAlias("alias2")
#add feature attributes
for(i in 1:3){
#create attribute
fat <- ISOFeatureAttribute$new()
fat$setMemberName(sprintf("name %s",i))
fat$setDefinition(sprintf("definition %s",i))
fat$setCardinality(lower=1,upper=1)
fat$setCode(sprintf("code %s",i))
#add measurement unit
gml <- GMLBaseUnit$new(id = sprintf("ID%s",i))
gml$setDescriptionReference("someref")
gml$setIdentifier("identifier", "codespace")
gml$addName("name1", "codespace")
gml$addName("name2", "codespace")
gml$setQuantityTypeReference("someref")
gml$setCatalogSymbol("symbol")
gml$setUnitsSystem("somelink")
fat$setValueMeasurementUnit(gml)
#add listed values
val1 <- ISOListedValue$new()
val1$setCode("code1")
val1$setLabel("label1")
val1$setDefinition("definition1")
fat$addListedValue(val1)
val2 <- ISOListedValue$new()
val2$setCode("code2")
val2$setLabel("label2")
val2$setDefinition("definition2")
fat$addListedValue(val2)
fat$setValueType("typeName")
#add feature attribute as carrierOfCharacteristic
md$addCharacteristic(fat)
}
expect_is(md, "ISOFeatureType")
xml <- md$encode()
expect_is(xml, "XMLInternalNode")
#decoding
md2 <- ISOFeatureType$new(xml = xml)
xml2 <- md2$encode()
expect_true(ISOAbstractObject$compare(md, md2))
})
test_that("encoding",{
testthat::skip_on_cran()
#featuretype
md <- ISOFeatureType$new()
md$setTypeName("typeName")
md$setDefinition(
"description",
locales = list(
EN = "the description",
FR = "la description",
ES = "la descripción",
AR = "الوصف",
RU = "описание",
ZH = "描述"
)
)
md$setCode("code")
md$setIsAbstract(FALSE)
md$addAlias("alias1")
md$addAlias("alias2")
#add feature attributes
for(i in 1:3){
#create attribute
fat <- ISOFeatureAttribute$new()
fat$setMemberName(sprintf("name%s",i))
fat$setDefinition(
sprintf("description %s",i),
locales = list(
EN = sprintf("the description %s",i),
FR = sprintf("la description %s",i),
ES = sprintf("la descripción %s",i),
AR = sprintf("%s الوصف)",i),
RU = sprintf("описание %s",i),
ZH = sprintf("描述 %s",i)
)
)
fat$setCardinality(lower=1,upper=1)
fat$setCode("code 1")
#add measurement unit
gml <- GMLBaseUnit$new(id = sprintf("ID%s",i))
gml$setDescriptionReference("someref")
gml$setIdentifier("identifier", "codespace")
gml$addName("name1", "codespace")
gml$addName("name2", "codespace")
gml$setQuantityTypeReference("someref")
gml$setCatalogSymbol("symbol")
gml$setUnitsSystem("somelink")
fat$setValueMeasurementUnit(gml)
#add listed values
val1 <- ISOListedValue$new()
val1$setCode("code1")
val1$setLabel(
"name in english 1",
locales = list(
EN = "name in english 1",
FR = "nom en français 1",
ES = "Nombre en español 1",
AR = "1 الاسم باللغة العربية",
RU = "имя на русском 1",
ZH = "中文名 1"
))
val1$setDefinition(
"definition in english 1",
locales = list(
EN = "definition in english 1",
FR = "définition en français 1",
ES = "definición en español 1",
AR = "1 التعريف باللغة العربية ",
RU = "Русское определение 1",
ZH = "中文定义1"
))
fat$addListedValue(val1)
val2 <- ISOListedValue$new()
val2$setCode("code2")
val2$setLabel(
"name in english 2",
locales = list(
EN = "name in english 2",
FR = "nom en français 2",
ES = "Nombre en español 2",
AR = "2 الاسم باللغة العربية",
RU = "имя на русском 2",
ZH = "中文名 2"
))
val2$setDefinition(
"definition in english 2",
locales = list(
EN = "definition in english 2",
FR = "définition en français 2",
ES = "definición en español 2",
AR = "2 التعريف باللغة العربية ",
RU = "Русское определение 2",
ZH = "中文定义2"
))
fat$addListedValue(val2)
fat$setValueType("typeName")
#add feature attribute as carrierOfCharacteristic
md$addCharacteristic(fat)
}
expect_is(md, "ISOFeatureType")
xml <- md$encode()
expect_is(xml, "XMLInternalNode")
#decoding
md2 <- ISOFeatureType$new(xml = xml)
xml2 <- md2$encode()
expect_true(ISOAbstractObject$compare(md, md2))
}) |
#' Prepare a parameter-long data frame for statistical analysis
#'
#' @param .data Data in parameter-long form
#' @param key The column name that contains the column names of the data matrix
#' @param value The column name that contains the values
#' @param qualifiers Columns that add context to observations (e.g., depth, zone, core)
#' @param fill If a key/value combination doesn't exist in the input, this value will be
#' assigned in the data matrix. Generally, using NA for geochemical data and 0 for relative
#' abundance data is advised.
#' @param groups Use [group_by][dplyr::group_by] or this argument to group by one or more columns (e.g., core or lake)
#' @param select_if Use `~TRUE` to keep all columns; use `~all(is.finite(.))` to keep columns
#' with all finite values. See [select_if][dplyr::select_if].
#' @param filter_all Use `any_vars(TRUE)` to keep all observations; use `all_vars(is.finite(.))` to
#' keep only observations with finite (non-missing) values. See [filter_all][dplyr::filter_all].
#' @param trans A function that will be applied to all columns, column-wise. Use [identity][base::identity]
#' to perform no transformation, use [scale][base::scale] to scale each column to a mean of zero and
#' variance of 1. See [mutate_all][dplyr::mutate_all].
#' @param ... One or more columns to unnest.
#'
#' @return A nested data matrix, which is composed of a [tibble::tibble()]
#' with tibble list-columns `data`, `discarded_rows`, `discarded_columns`, and
#' `qualifiers`.
#' @export
#'
#' @examples
#' nested_data(
#' alta_lake_geochem,
#' qualifiers = c(age, depth, zone),
#' key = param,
#' value = value,
#' trans = scale
#' )
#'
#' @importFrom dplyr any_vars all_vars
#' @importFrom rlang enquo quos !! !!!
#'
nested_data <- function(.data, qualifiers = NULL, key = NULL, value, fill = NA,
select_if = ~TRUE, filter_all = any_vars(TRUE), trans = identity,
groups = NULL) {
stopifnot(
is.data.frame(.data)
)
groups <- enquo(groups)
qualifiers <- enquo(qualifiers)
key <- enquo(key)
value <- enquo(value)
if(rlang::quo_is_null(groups)) {
groups <- dplyr::group_vars(.data)
}
# this makes sure all args refer to valid columns, enables checking names later
group_vars <- unname(tidyselect::vars_select(colnames(.data), !!groups))
qualifier_vars <- unname(tidyselect::vars_select(colnames(.data), !!qualifiers))
key_vars <- unname(tidyselect::vars_select(colnames(.data), !!key))
value_vars <- unname(tidyselect::vars_select(colnames(.data), !!value))
# key, qualifier, and group vars can't intersect
stopifnot(
length(intersect(group_vars, qualifier_vars)) == 0,
length(intersect(group_vars, key_vars)) == 0,
length(intersect(qualifier_vars, key_vars)) == 0
)
# value vars are value_vars minus all others (facilitates use of tidyselect helpers)
value_vars <- setdiff(value_vars, c(group_vars, qualifier_vars, key_vars))
if(length(value_vars) == 0) {
stop(">0 value columns must be specified")
} else if(length(value_vars) == 1) {
if(length(key_vars) != 1) stop("Using a single `value` column, exactly one column must be specified by `key`")
# variables as distinct values of `key`
variables <- dplyr::distinct(
dplyr::select(
dplyr::ungroup(.data),
!!key
)
)[[1]]
# makes sure factor keys stay in order later
variables <- as.character(sort(variables))
} else {
# ignore key_vars and fill if wide data is specified
if(length(key_vars) > 0) message("Ignoring variables specified in `key` because more than one `value` column was specified")
key_vars <- character(0)
if(!identical(fill, NA)) message("Ignoring `fill`")
variables <- character(0)
}
# checking column names here to make sure everything can be unnest()ed
# without name conflicts
reserved_names <- c(
"discarded_columns", "discarded_rows", "qualifiers",
"data", "df_original"
)
# col names can't be reserved, and can't be identical to any of the variables
check_problematic_names(c(group_vars, qualifier_vars, key_vars, value_vars), c(reserved_names, variables))
# variables can't be reserved names either, because they become columns eventually (if they aren't already)
check_problematic_names(variables, reserved_names, data_name = key_vars)
data <- dplyr::select(
dplyr::ungroup(.data),
!!group_vars, !!qualifier_vars, !!key_vars, !!value_vars
)
grouped <- dplyr::group_by_at(data, dplyr::vars(!!group_vars))
nested <- tidyr::nest(grouped, df_original = c(!!qualifier_vars, !!key_vars, !!value_vars))
output <- purrr::map(nested$df_original, function(df) {
# spread if there is one value var
if(length(value_vars) == 1) {
df <- tidyr::spread(df, !!key_vars, !!value_vars, fill = fill)
# some "variables" aren't present in all groups
value_vars <- intersect(variables, colnames(df))
}
# select -> filter
data_df <- dplyr::select(df, !!value_vars)
data_df_selected <- dplyr::select_if(data_df, select_if)
deselected_names <- setdiff(colnames(data_df), colnames(data_df_selected))
# deselect names that were identified by the previous operation
selected_df <- dplyr::select(df, -!!deselected_names)
discarded_columns <- dplyr::select(data_df, !!deselected_names)
value_vars <- setdiff(value_vars, deselected_names)
# only filter at value variables
filtered_df <- dplyr::filter_at(selected_df, dplyr::vars(!!value_vars), filter_all)
discarded_rows <- dplyr::setdiff(selected_df, filtered_df)
# only transform at value variables
trans_df <- dplyr::mutate_at(filtered_df, dplyr::vars(!!value_vars), trans)
# qualifier vars should always have at least something (row number)
qualifiers <- dplyr::select(trans_df, !!qualifier_vars)
qualifiers$row_number <- seq_len(nrow(qualifiers))
list(
discarded_columns = discarded_columns,
discarded_rows = discarded_rows,
qualifiers = qualifiers,
data = dplyr::select(trans_df, -!!qualifier_vars)
)
})
new_nested_data(
tibble::as_tibble(
dplyr::bind_cols(
dplyr::select(
dplyr::ungroup(nested),
!!group_vars
),
tibble::as_tibble(
purrr::transpose(output)
)
)
)
)
}
#' @rdname nested_data
#' @export
unnested_data <- function(.data, ...) {
not_list_cols <- names(.data)[!vapply(.data, is.list, logical(1))]
dots <- rlang::quos(...)
.data <- dplyr::select(.data, !!not_list_cols, !!!dots)
tidyr::unnest(.data, c(!!!dots))
}
#' Perform an analysis on a nested data matrix
#'
#' @param .data A data frame with a list column of data frames, possibly created using
#' [nested_data].
#' @param .fun A model function
#' @param .reserved_names Names that should not be allowed as columns in any
#' data frame within this object
#' @param .output_column A column name in which the output of .fun should be stored.
#' @param .env Passed to [as_function][rlang::as_function]
#' @param ... Passed to fun
#'
#' @return .data with an additional list column of fun output
#' @export
#'
#' @examples
#' nd <- nested_data(
#' alta_lake_geochem,
#' qualifiers = c(age, depth, zone),
#' key = param,
#' value = value,
#' trans = scale
#' )
#'
#' na <- nested_analysis(nd, vegan::rda, X = data)
#' plot(na)
#'
nested_analysis <- function(.data, .fun, ..., .output_column = "model", .reserved_names = NULL, .env = parent.frame()) {
# allow lambda-style functions
.fun <- rlang::as_function(.fun, env = .env)
# args get evalulated 'tidily' within the transposed data,
# so they can refer to columns in the nested_analysis data frame
more_args <- rlang::quos(...)
stopifnot(
is.null(.output_column) || is.character(.output_column) & length(.output_column) == 1
)
# column names can't be reserved names in .data or in nested data columns
check_problematic_names(colnames(.data), c(.reserved_names, .output_column))
purrr::map(colnames(.data), function(col_name) {
col <- .data[[col_name]]
if(is.list(col)) {
purrr::map(col, function(list_item) {
if(is.data.frame(list_item)) {
check_problematic_names(colnames(list_item), c(.reserved_names, .output_column), data_name = col_name)
}
})
}
})
result <- purrr::map(purrr::transpose(.data), function(row) {
args <- purrr::map(more_args, function(arg_q) rlang::eval_tidy(arg_q, data = row))
do.call(.fun, args)
})
if(!is.null(.output_column)) {
.data[[.output_column]] <- result
}
new_nested_analysis(.data)
}
new_nested_data <- function(x, subclasses = character(0)) {
structure(x, class = unique(c(subclasses, "nested_data", class(x))))
}
new_nested_analysis <- function(x, subclasses = character(0)) {
structure(x, class = unique(c(subclasses, "nested_analysis", class(x))))
}
# dynamically exported in zzz.R
filter.nested_data <- function(.data, ...) {
data_class <- class(.data)
structure(NextMethod(), class = data_class)
}
# dynamically exported in zzz.R
filter.nested_analysis <- function(.data, ...) {
data_class <- class(.data)
structure(NextMethod(), class = data_class)
}
# dynamically exported in zzz.R
slice.nested_data <- function(.data, ...) {
data_class <- class(.data)
structure(NextMethod(), class = data_class)
}
# dynamically exported in zzz.R
slice.nested_analysis <- function(.data, ...) {
data_class <- class(.data)
structure(NextMethod(), class = data_class)
}
# dynamically exported in zzz.R
arrange.nested_data <- function(.data, ...) {
data_class <- class(.data)
structure(NextMethod(), class = data_class)
}
# dynamically exported in zzz.R
arrange.nested_analysis <- function(.data, ...) {
data_class <- class(.data)
structure(NextMethod(), class = data_class)
}
# dynamically exported in zzz.R
mutate.nested_data <- function(.data, ...) {
data_class <- class(.data)
result <- NextMethod()
# only maintain class if no columns were dropped
# it's possible that the user corrupts an existing column here
# but this is probably rare
if(all(colnames(.data) %in% colnames(result))) {
structure(result, class = data_class)
} else {
structure(result, class = setdiff(data_class, "nested_data"))
}
}
# dynamically exported in zzz.R
mutate.nested_analysis <- function(.data, ...) {
data_class <- class(.data)
result <- NextMethod()
# only maintain class if no columns were dropped
# it's possible that the user corrupts an existing column here
# but this is probably rare
if(all(colnames(.data) %in% colnames(result))) {
structure(result, class = data_class)
} else {
structure(result, class = setdiff(data_class, "nested_analysis"))
}
}
#' Plot a nested analysis
#'
#' Calls [plot][graphics::plot] or another (base) plotting function on all models, arranging the output in subplots.
#'
#' @param x,.x A [nested_analysis] object (or subclass)
#' @param .fun A function that produces graphical output
#' @param main The plot title
#' @param ... Passed to the plot function. Tidy evaluation is supported, and arguments are evaluated
#' within a transposed version of x for each row.
#' @param nrow,ncol Force a number of rows or columns in the output
#' @param .model_column The column containing the model
#' @param .output_column The column in which the output of the plot function should be placed
#'
#' @return the input, invisibly
#' @export
#'
#' @examples
#' nd <- nested_data(
#' alta_lake_geochem,
#' qualifiers = c(age, depth, zone),
#' key = param,
#' value = value,
#' trans = scale
#' )
#'
#' na <- nested_analysis(nd, vegan::rda, X = data)
#' plot(na)
#'
#' @importFrom graphics plot
#'
plot.nested_analysis <- function(x, ..., main = "", nrow = NULL, ncol = NULL) {
main <- enquo(main)
plot_nested_analysis(x, .fun = graphics::plot, ..., main = !!main, nrow = nrow, ncol = ncol)
}
#' @rdname plot.nested_analysis
#' @export
plot_nested_analysis <- function(.x, .fun, ..., nrow = NULL, ncol = NULL, .model_column = .data$model, .output_column = NULL) {
.model_column <- enquo(.model_column)
# handle zero rows quietly
if(nrow(.x) == 0) return(.x)
dims <- wrap_dims(nrow(.x), nrow, ncol)
invisible(
withr::with_par(list(mfrow = dims), {
nested_analysis(.x, .fun, !!.model_column, ..., .output_column = .output_column)
})
)
}
# I ripped this off of ggplot2 to see how it was done...hard to write it any better
wrap_dims <- function(n, nrow = NULL, ncol = NULL) {
if (is.null(ncol) && is.null(nrow)) {
default_row_col <- grDevices::n2mfrow(n)
nrow <- default_row_col[2]
ncol <- default_row_col[1]
} else if (is.null(ncol)) {
ncol <- ceiling(n / nrow)
} else if (is.null(nrow)) {
nrow <- ceiling(n / ncol)
}
c(nrow, ncol)
}
get_grouping_vars <- function(ndm) {
# everything that isn't a list column before "data"
data_loc <- which(colnames(ndm) == "data")[1]
if(is.na(data_loc)) stop("'wide_df' was not found in the nested data matrix")
list_cols <- colnames(ndm)[purrr::map_lgl(ndm, is.list)]
possible_names <- colnames(ndm)[seq_len(data_loc - 1)]
setdiff(possible_names, list_cols)
}
check_problematic_names <- function(col_names, bad_names, data_name = ".data", action = stop) {
bad_names_in_df <- intersect(col_names, bad_names)
if(length(bad_names_in_df) > 0) {
action(
"The following names in ", data_name, " are reserved must be renamed: ",
paste0("`", bad_names_in_df, "`", collapse = ", ")
)
}
}
drop_list_cols <- function(x, except = character(0)) {
list_cols <- setdiff(names(x)[vapply(x, is.list, logical(1))], except)
x[setdiff(names(x), list_cols)]
}
| /R/nested_data_matrix.R | permissive | paleolimbot/tidypaleo | R | false | false | 13,782 | r |
#' Prepare a parameter-long data frame for statistical analysis
#'
#' @param .data Data in parameter-long form
#' @param key The column name that contains the column names of the data matrix
#' @param value The column name that contains the values
#' @param qualifiers Columns that add context to observations (e.g., depth, zone, core)
#' @param fill If a key/value combination doesn't exist in the input, this value will be
#' assigned in the data matrix. Generally, using NA for geochemical data and 0 for relative
#' abundance data is advised.
#' @param groups Use [group_by][dplyr::group_by] or this argument to group by one or more columns (e.g., core or lake)
#' @param select_if Use `~TRUE` to keep all columns; use `~all(is.finite(.))` to keep columns
#' with all finite values. See [select_if][dplyr::select_if].
#' @param filter_all Use `any_vars(TRUE)` to keep all observations; use `all_vars(is.finite(.))` to
#' keep only observations with finite (non-missing) values. See [filter_all][dplyr::filter_all].
#' @param trans A function that will be applied to all columns, column-wise. Use [identity][base::identity]
#' to perform no transformation, use [scale][base::scale] to scale each column to a mean of zero and
#' variance of 1. See [mutate_all][dplyr::mutate_all].
#' @param ... One or more columns to unnest.
#'
#' @return A nested data matrix, which is composed of a [tibble::tibble()]
#' with tibble list-columns `data`, `discarded_rows`, `discarded_columns`, and
#' `qualifiers`.
#' @export
#'
#' @examples
#' nested_data(
#' alta_lake_geochem,
#' qualifiers = c(age, depth, zone),
#' key = param,
#' value = value,
#' trans = scale
#' )
#'
#' @importFrom dplyr any_vars all_vars
#' @importFrom rlang enquo quos !! !!!
#'
nested_data <- function(.data, qualifiers = NULL, key = NULL, value, fill = NA,
select_if = ~TRUE, filter_all = any_vars(TRUE), trans = identity,
groups = NULL) {
stopifnot(
is.data.frame(.data)
)
groups <- enquo(groups)
qualifiers <- enquo(qualifiers)
key <- enquo(key)
value <- enquo(value)
if(rlang::quo_is_null(groups)) {
groups <- dplyr::group_vars(.data)
}
# this makes sure all args refer to valid columns, enables checking names later
group_vars <- unname(tidyselect::vars_select(colnames(.data), !!groups))
qualifier_vars <- unname(tidyselect::vars_select(colnames(.data), !!qualifiers))
key_vars <- unname(tidyselect::vars_select(colnames(.data), !!key))
value_vars <- unname(tidyselect::vars_select(colnames(.data), !!value))
# key, qualifier, and group vars can't intersect
stopifnot(
length(intersect(group_vars, qualifier_vars)) == 0,
length(intersect(group_vars, key_vars)) == 0,
length(intersect(qualifier_vars, key_vars)) == 0
)
# value vars are value_vars minus all others (facilitates use of tidyselect helpers)
value_vars <- setdiff(value_vars, c(group_vars, qualifier_vars, key_vars))
if(length(value_vars) == 0) {
stop(">0 value columns must be specified")
} else if(length(value_vars) == 1) {
if(length(key_vars) != 1) stop("Using a single `value` column, exactly one column must be specified by `key`")
# variables as distinct values of `key`
variables <- dplyr::distinct(
dplyr::select(
dplyr::ungroup(.data),
!!key
)
)[[1]]
# makes sure factor keys stay in order later
variables <- as.character(sort(variables))
} else {
# ignore key_vars and fill if wide data is specified
if(length(key_vars) > 0) message("Ignoring variables specified in `key` because more than one `value` column was specified")
key_vars <- character(0)
if(!identical(fill, NA)) message("Ignoring `fill`")
variables <- character(0)
}
# checking column names here to make sure everything can be unnest()ed
# without name conflicts
reserved_names <- c(
"discarded_columns", "discarded_rows", "qualifiers",
"data", "df_original"
)
# col names can't be reserved, and can't be identical to any of the variables
check_problematic_names(c(group_vars, qualifier_vars, key_vars, value_vars), c(reserved_names, variables))
# variables can't be reserved names either, because they become columns eventually (if they aren't already)
check_problematic_names(variables, reserved_names, data_name = key_vars)
data <- dplyr::select(
dplyr::ungroup(.data),
!!group_vars, !!qualifier_vars, !!key_vars, !!value_vars
)
grouped <- dplyr::group_by_at(data, dplyr::vars(!!group_vars))
nested <- tidyr::nest(grouped, df_original = c(!!qualifier_vars, !!key_vars, !!value_vars))
output <- purrr::map(nested$df_original, function(df) {
# spread if there is one value var
if(length(value_vars) == 1) {
df <- tidyr::spread(df, !!key_vars, !!value_vars, fill = fill)
# some "variables" aren't present in all groups
value_vars <- intersect(variables, colnames(df))
}
# select -> filter
data_df <- dplyr::select(df, !!value_vars)
data_df_selected <- dplyr::select_if(data_df, select_if)
deselected_names <- setdiff(colnames(data_df), colnames(data_df_selected))
# deselect names that were identified by the previous operation
selected_df <- dplyr::select(df, -!!deselected_names)
discarded_columns <- dplyr::select(data_df, !!deselected_names)
value_vars <- setdiff(value_vars, deselected_names)
# only filter at value variables
filtered_df <- dplyr::filter_at(selected_df, dplyr::vars(!!value_vars), filter_all)
discarded_rows <- dplyr::setdiff(selected_df, filtered_df)
# only transform at value variables
trans_df <- dplyr::mutate_at(filtered_df, dplyr::vars(!!value_vars), trans)
# qualifier vars should always have at least something (row number)
qualifiers <- dplyr::select(trans_df, !!qualifier_vars)
qualifiers$row_number <- seq_len(nrow(qualifiers))
list(
discarded_columns = discarded_columns,
discarded_rows = discarded_rows,
qualifiers = qualifiers,
data = dplyr::select(trans_df, -!!qualifier_vars)
)
})
new_nested_data(
tibble::as_tibble(
dplyr::bind_cols(
dplyr::select(
dplyr::ungroup(nested),
!!group_vars
),
tibble::as_tibble(
purrr::transpose(output)
)
)
)
)
}
#' @rdname nested_data
#' @export
unnested_data <- function(.data, ...) {
not_list_cols <- names(.data)[!vapply(.data, is.list, logical(1))]
dots <- rlang::quos(...)
.data <- dplyr::select(.data, !!not_list_cols, !!!dots)
tidyr::unnest(.data, c(!!!dots))
}
#' Perform an analysis on a nested data matrix
#'
#' @param .data A data frame with a list column of data frames, possibly created using
#' [nested_data].
#' @param .fun A model function
#' @param .reserved_names Names that should not be allowed as columns in any
#' data frame within this object
#' @param .output_column A column name in which the output of .fun should be stored.
#' @param .env Passed to [as_function][rlang::as_function]
#' @param ... Passed to fun
#'
#' @return .data with an additional list column of fun output
#' @export
#'
#' @examples
#' nd <- nested_data(
#' alta_lake_geochem,
#' qualifiers = c(age, depth, zone),
#' key = param,
#' value = value,
#' trans = scale
#' )
#'
#' na <- nested_analysis(nd, vegan::rda, X = data)
#' plot(na)
#'
nested_analysis <- function(.data, .fun, ..., .output_column = "model", .reserved_names = NULL, .env = parent.frame()) {
# allow lambda-style functions
.fun <- rlang::as_function(.fun, env = .env)
# args get evalulated 'tidily' within the transposed data,
# so they can refer to columns in the nested_analysis data frame
more_args <- rlang::quos(...)
stopifnot(
is.null(.output_column) || is.character(.output_column) & length(.output_column) == 1
)
# column names can't be reserved names in .data or in nested data columns
check_problematic_names(colnames(.data), c(.reserved_names, .output_column))
purrr::map(colnames(.data), function(col_name) {
col <- .data[[col_name]]
if(is.list(col)) {
purrr::map(col, function(list_item) {
if(is.data.frame(list_item)) {
check_problematic_names(colnames(list_item), c(.reserved_names, .output_column), data_name = col_name)
}
})
}
})
result <- purrr::map(purrr::transpose(.data), function(row) {
args <- purrr::map(more_args, function(arg_q) rlang::eval_tidy(arg_q, data = row))
do.call(.fun, args)
})
if(!is.null(.output_column)) {
.data[[.output_column]] <- result
}
new_nested_analysis(.data)
}
new_nested_data <- function(x, subclasses = character(0)) {
structure(x, class = unique(c(subclasses, "nested_data", class(x))))
}
new_nested_analysis <- function(x, subclasses = character(0)) {
structure(x, class = unique(c(subclasses, "nested_analysis", class(x))))
}
# dynamically exported in zzz.R
filter.nested_data <- function(.data, ...) {
data_class <- class(.data)
structure(NextMethod(), class = data_class)
}
# dynamically exported in zzz.R
filter.nested_analysis <- function(.data, ...) {
data_class <- class(.data)
structure(NextMethod(), class = data_class)
}
# dynamically exported in zzz.R
slice.nested_data <- function(.data, ...) {
data_class <- class(.data)
structure(NextMethod(), class = data_class)
}
# dynamically exported in zzz.R
slice.nested_analysis <- function(.data, ...) {
data_class <- class(.data)
structure(NextMethod(), class = data_class)
}
# dynamically exported in zzz.R
arrange.nested_data <- function(.data, ...) {
data_class <- class(.data)
structure(NextMethod(), class = data_class)
}
# dynamically exported in zzz.R
arrange.nested_analysis <- function(.data, ...) {
data_class <- class(.data)
structure(NextMethod(), class = data_class)
}
# dynamically exported in zzz.R
mutate.nested_data <- function(.data, ...) {
data_class <- class(.data)
result <- NextMethod()
# only maintain class if no columns were dropped
# it's possible that the user corrupts an existing column here
# but this is probably rare
if(all(colnames(.data) %in% colnames(result))) {
structure(result, class = data_class)
} else {
structure(result, class = setdiff(data_class, "nested_data"))
}
}
# dynamically exported in zzz.R
mutate.nested_analysis <- function(.data, ...) {
data_class <- class(.data)
result <- NextMethod()
# only maintain class if no columns were dropped
# it's possible that the user corrupts an existing column here
# but this is probably rare
if(all(colnames(.data) %in% colnames(result))) {
structure(result, class = data_class)
} else {
structure(result, class = setdiff(data_class, "nested_analysis"))
}
}
#' Plot a nested analysis
#'
#' Calls [plot][graphics::plot] or another (base) plotting function on all models, arranging the output in subplots.
#'
#' @param x,.x A [nested_analysis] object (or subclass)
#' @param .fun A function that produces graphical output
#' @param main The plot title
#' @param ... Passed to the plot function. Tidy evaluation is supported, and arguments are evaluated
#' within a transposed version of x for each row.
#' @param nrow,ncol Force a number of rows or columns in the output
#' @param .model_column The column containing the model
#' @param .output_column The column in which the output of the plot function should be placed
#'
#' @return the input, invisibly
#' @export
#'
#' @examples
#' nd <- nested_data(
#' alta_lake_geochem,
#' qualifiers = c(age, depth, zone),
#' key = param,
#' value = value,
#' trans = scale
#' )
#'
#' na <- nested_analysis(nd, vegan::rda, X = data)
#' plot(na)
#'
#' @importFrom graphics plot
#'
plot.nested_analysis <- function(x, ..., main = "", nrow = NULL, ncol = NULL) {
main <- enquo(main)
plot_nested_analysis(x, .fun = graphics::plot, ..., main = !!main, nrow = nrow, ncol = ncol)
}
#' @rdname plot.nested_analysis
#' @export
plot_nested_analysis <- function(.x, .fun, ..., nrow = NULL, ncol = NULL, .model_column = .data$model, .output_column = NULL) {
.model_column <- enquo(.model_column)
# handle zero rows quietly
if(nrow(.x) == 0) return(.x)
dims <- wrap_dims(nrow(.x), nrow, ncol)
invisible(
withr::with_par(list(mfrow = dims), {
nested_analysis(.x, .fun, !!.model_column, ..., .output_column = .output_column)
})
)
}
# I ripped this off of ggplot2 to see how it was done...hard to write it any better
wrap_dims <- function(n, nrow = NULL, ncol = NULL) {
if (is.null(ncol) && is.null(nrow)) {
default_row_col <- grDevices::n2mfrow(n)
nrow <- default_row_col[2]
ncol <- default_row_col[1]
} else if (is.null(ncol)) {
ncol <- ceiling(n / nrow)
} else if (is.null(nrow)) {
nrow <- ceiling(n / ncol)
}
c(nrow, ncol)
}
get_grouping_vars <- function(ndm) {
# everything that isn't a list column before "data"
data_loc <- which(colnames(ndm) == "data")[1]
if(is.na(data_loc)) stop("'wide_df' was not found in the nested data matrix")
list_cols <- colnames(ndm)[purrr::map_lgl(ndm, is.list)]
possible_names <- colnames(ndm)[seq_len(data_loc - 1)]
setdiff(possible_names, list_cols)
}
check_problematic_names <- function(col_names, bad_names, data_name = ".data", action = stop) {
bad_names_in_df <- intersect(col_names, bad_names)
if(length(bad_names_in_df) > 0) {
action(
"The following names in ", data_name, " are reserved must be renamed: ",
paste0("`", bad_names_in_df, "`", collapse = ", ")
)
}
}
drop_list_cols <- function(x, except = character(0)) {
list_cols <- setdiff(names(x)[vapply(x, is.list, logical(1))], except)
x[setdiff(names(x), list_cols)]
}
|
#' A function that checks, if the two given vectors have equal length.
#'
#' @param vector1 A arbitrary vector.
#' @param vector2 A arbitrary vector.
#' @return The boolean value TRUE, if the two given vectors have equal length.
check_vector_equal_length <- function(vector1, vector2) {
return(length(vector1) == length(vector2))
}
#' A function that checks, if the 'checkVector' has either equal length as the 'refVector' or is of length 1.
#'
#' @param checkVector The vector to be checked.
#' @param refVector The reference vector.
#' @return The boolean value TRUE, if the 'checkVector' has either equal length as the 'refVector' or is of length 1.
check_vector_equal_length_or_one <- function(checkVector, refVector) {
return(length(checkVector) == length(refVector) || length(checkVector)==1 )
}
#' A function that checks, if the given vector contains either a NA, NaN or a NULL value.
#'
#' @param vector1 A arbitrary vector.
#' @return The boolean value TRUE, if the given vector contains either a NA, NaN or a NULL value.
check_vector_contains_na_or_or_nan_null <- function(vector) {
return(any(is.na(vector)) || any(is.nan(vector) || any(is.null(vector))))
}
check_vector_unique <- function(vector){
return(length(unique(vector))==length(vector))
}
| /R/inputchecker.R | no_license | KonstantinRK/tempnetwork | R | false | false | 1,272 | r |
#' A function that checks, if the two given vectors have equal length.
#'
#' @param vector1 A arbitrary vector.
#' @param vector2 A arbitrary vector.
#' @return The boolean value TRUE, if the two given vectors have equal length.
check_vector_equal_length <- function(vector1, vector2) {
return(length(vector1) == length(vector2))
}
#' A function that checks, if the 'checkVector' has either equal length as the 'refVector' or is of length 1.
#'
#' @param checkVector The vector to be checked.
#' @param refVector The reference vector.
#' @return The boolean value TRUE, if the 'checkVector' has either equal length as the 'refVector' or is of length 1.
check_vector_equal_length_or_one <- function(checkVector, refVector) {
return(length(checkVector) == length(refVector) || length(checkVector)==1 )
}
#' A function that checks, if the given vector contains either a NA, NaN or a NULL value.
#'
#' @param vector1 A arbitrary vector.
#' @return The boolean value TRUE, if the given vector contains either a NA, NaN or a NULL value.
check_vector_contains_na_or_or_nan_null <- function(vector) {
return(any(is.na(vector)) || any(is.nan(vector) || any(is.null(vector))))
}
check_vector_unique <- function(vector){
return(length(unique(vector))==length(vector))
}
|
##load the large dataset.
filename <- "./data/household_power_consumption.txt"
data <- read.table(filename,
header = TRUE,
sep = ";",
colClasses = c("character", "character", rep("numeric",7)),
na = "?")
dim(data) # 2075259 9
attach(data)
## We only need data of 2 days.
subset <- Date == "1/2/2007" | Date == "2/2/2007"
newData <- data[subset, ]
attach(newData)
x <- paste(Date, Time)
newData$DateTime <- strptime(x, "%d/%m/%Y %H:%M:%S")
rownames(newData) <- 1:nrow(newData)
dim(newData) # 2880 10
attach(newData) | /load_data.R | no_license | wym007/ExData_Plotting1 | R | false | false | 593 | r | ##load the large dataset.
filename <- "./data/household_power_consumption.txt"
data <- read.table(filename,
header = TRUE,
sep = ";",
colClasses = c("character", "character", rep("numeric",7)),
na = "?")
dim(data) # 2075259 9
attach(data)
## We only need data of 2 days.
subset <- Date == "1/2/2007" | Date == "2/2/2007"
newData <- data[subset, ]
attach(newData)
x <- paste(Date, Time)
newData$DateTime <- strptime(x, "%d/%m/%Y %H:%M:%S")
rownames(newData) <- 1:nrow(newData)
dim(newData) # 2880 10
attach(newData) |
# Push
# Last edited 1/17/2016
# Manny
## NOTES:
# REDUCE SIZE? NEED TO SHORTEN LOAD TIME
# REDUCE AT LEAST ONE OF STRENGTH OR SCORE STATE (GROUP PP/SH OR LEADING/TRAILING)
# KEEP ONLY LINES AND PAIRINGS (SET MIN TOI?)
# Load libraries
library(RSQLite)
library(dplyr)
## Load DB Tables
# Link to database
link <- "~/Documents/dryscrape data/dryscrape.sqlite"
newcon <- dbConnect(SQLite(), link)
start <- Sys.time()
# Read tables
roster <- dbReadTable(newcon, "roster")
team <- dbReadTable(newcon, "team")
goalie <- dbReadTable(newcon, "goalie")
player <- dbReadTable(newcon, "player")
combo <- dbReadTable(newcon, "combo")
## Aggregate Stats
# Roster
sumroster <- group_by(roster, Full.Name, Season, Season.Type) %>%
summarise(Team = paste(unique(Team), collapse = "/"), Number = paste(unique(Number), collapse = "/"), Team.Num = paste(unique(Team.Num), collapse = "/"),
Position = paste(unique(Position), collapse = "/"), Last.Name = first(Last.Name), First.Name = first(First.Name)) %>%
data.frame()
# Team
teamgp <- group_by(team, Team, Season, Season.Type) %>% summarise(GP = length(unique(Newcode))) %>% data.frame() %>%
mutate(Code = paste(Team, Season, Season.Type, sep = ".")) %>% data.frame()
# Group leftover strength states
team$Strength.State[which(team$Strength.State %in% c("5v5", "5v4", "4v5", "4v4", "5v3", "3v5", "3v3", "4v3", "3v4", "0v0") == FALSE)] <- "XvX" # EXCLUDE SHOOTOUT
sumteam <- filter(team, Strength.State != "0v0") %>% group_by(Team, Season, Venue, Strength.State, Score.Cat, Season.Type) %>%
summarise(GP = teamgp$GP[match(paste(first(Team), first(Season), first(Season.Type), sep = "."), teamgp$Code)],
TOI = sum(TOI), CF = sum(CF), CA = sum(CA), FF = sum(FF), FA = sum(FA), SF = sum(SF), SA = sum(SA), GF = sum(GF), GA = sum(GA),
xGF = sum(xGF), xGA = sum(xGA), ACF = sum(ACF), ACA = sum(ACA), AFF = sum(AFF), AFA = sum(AFA), ASF = sum(ASF), ASA = sum(ASA),
AGF = sum(AGF), AGA = sum(AGA), AxGF = sum(AxGF), AxGA = sum(AxGA), MCF = sum(MCF), MCA = sum(MCA), MFF = sum(MFF), MFA = sum(MFA),
MSF = sum(MSF), MSA = sum(MSA), MGF = sum(MGF), MGA = sum(MGA), MxGF = sum(MxGF), MxGA = sum(MxGA), OZS = sum(OZS), DZS = sum(DZS), NZS = sum(NZS), OZF = sum(OZF),
DZF = sum(DZF), NZF = sum(NZF), FOW = sum(FOW), FOL = sum(FOL), HF = sum(HF), HA = sum(HA), GVA = sum(GVA), TKA = sum(TKA),
PENT = sum(PENT), PEND = sum(PEND), DISTF = sum(DISTF), DISTA = sum(DISTA)) %>% data.frame()
# Goalie
goaliegp <- group_by(goalie, Player, Season, Season.Type) %>% summarise(GP = length(unique(Newcode))) %>% data.frame() %>%
mutate(Code = paste(Player, Season, Season.Type, sep = ".")) %>% data.frame()
# Group leftover strength states
goalie$Strength.State[which(goalie$Strength.State %in% c("5v5", "5v4", "4v5", "4v4", "5v3", "3v5", "3v3", "4v3", "3v4", "0v0") == FALSE)] <- "XvX" # EXCLUDE SHOOTOUT
sumgoalie <- filter(goalie, Strength.State != "0v0") %>% group_by(Player, Season, Venue, Strength.State, Score.Cat, Season.Type) %>%
summarise(GP = goaliegp$GP[match(paste(first(Player), first(Season), first(Season.Type), sep = "."), goaliegp$Code)],
Team = paste(unique(Team), collapse = "/"), TOI = sum(TOI), CF = sum(CF), CA = sum(CA), FF = sum(FF), FA = sum(FA), SF = sum(SF), SA = sum(SA),
GF = sum(GF), GA = sum(GA), xGF = sum(xGF), xGA = sum(xGA), ACF = sum(ACF), ACA = sum(ACA), AFF = sum(AFF), AFA = sum(AFA), ASF = sum(ASF), ASA = sum(ASA),
AGF = sum(AGF), AGA = sum(AGA), AxGF = sum(AxGF), AxGA = sum(AxGA), MCF = sum(MCF), MCA = sum(MCA), MFF = sum(MFF), MFA = sum(MFA),
MSF = sum(MSF), MSA = sum(MSA), MGF = sum(MGF), MGA = sum(MGA), MxGF = sum(MxGF), MxGA = sum(MxGA), OZS = sum(OZS), DZS = sum(DZS), NZS = sum(NZS), OZF = sum(OZF),
DZF = sum(DZF), NZF = sum(NZF), FOW = sum(FOW), FOL = sum(FOL), HF = sum(HF), HA = sum(HA), GVA = sum(GVA), TKA = sum(TKA),
PENT = sum(PENT), PEND = sum(PEND), DISTA = sum(DISTA), G = sum(G), A1 = sum(na.omit(A1)), A2 = sum(na.omit(A2)), SOA = sum(SOA), SOG = sum(SOG),
iPENT = sum(iPENT), iPEND = sum(na.omit(iPEND))) %>% data.frame()
# Player
playergp <- group_by(player, Player, Season, Season.Type) %>% summarise(GP = length(unique(Newcode))) %>% data.frame() %>%
mutate(Code = paste(Player, Season, Season.Type, sep = ".")) %>% data.frame()
# Group leftover strength states
player$Strength.State[which(player$Strength.State %in% c("5v5", "5v4", "4v5", "4v4", "5v3", "3v5", "3v3", "4v3", "3v4", "0v0") == FALSE)] <- "XvX" # EXCLUDE SHOOTOUT
sumplayer <- filter(player, Strength.State != "0v0") %>% group_by(Player, Season, Venue, Strength.State, Score.Cat, Season.Type) %>%
summarise(GP = playergp$GP[match(paste(first(Player), first(Season), first(Season.Type), sep = "."), playergp$Code)],
Position = sumroster$Position[match(first(Player), sumroster$Full.Name)],
Team = paste(unique(Team), collapse = "/"), TOI = sum(TOI), CF = sum(CF), CA = sum(CA), iCF = sum(iCF), FF = sum(FF), FA = sum(FA), iFF = sum(iFF),
SF = sum(SF), SA = sum(SA), iSF = sum(iSF), GF = sum(GF), GA = sum(GA), xGF = sum(xGF), xGA = sum(xGA), ixG = sum(ixG),
ACF = sum(ACF), ACA = sum(ACA), AFF = sum(AFF), AFA = sum(AFA), ASF = sum(ASF), ASA = sum(ASA), AGF = sum(AGF), AGA = sum(AGA),
AxGF = sum(AxGF), AxGA = sum(AxGA), MCF = sum(MCF), MCA = sum(MCA), MFF = sum(MFF), MFA = sum(MFA),
MSF = sum(MSF), MSA = sum(MSA), MGF = sum(MGF), MGA = sum(MGA), MxGF = sum(MxGF), MxGA = sum(MxGA),
OZS = sum(OZS), DZS = sum(DZS), NZS = sum(NZS), OTF = sum(OTF), OZF = sum(OZF), DZF = sum(DZF), NZF = sum(NZF),
FOW = sum(FOW), FOL = sum(FOL), iFOW = sum(iFOW), iFOL = sum(iFOL), HF = sum(HF), HA = sum(HA), iHF = sum(iHF), iHA = sum(iHA),
GVA = sum(GVA), TKA = sum(TKA), iGVA = sum(iGVA), iTKA = sum(iTKA), iBLK = sum(iBLK), PENT = sum(PENT), PEND = sum(PEND),
iDIST = sum(iDIST), G = sum(G), A1 = sum(na.omit(A1)), A2 = sum(na.omit(A2)), SOA = sum(SOA), SOG = sum(SOG), iPENT = sum(iPENT), iPEND = sum(na.omit(iPEND)),
tTOI = sum(tTOI), tCF = sum(tCF), tCA = sum(tCA), tFF = sum(tFF), tFA = sum(tFA), tSF = sum(tSF), tSA = sum(tSA),
tGF = sum(tGF), tGA = sum(tGA), txGF = sum(txGF), txGA = sum(txGA), tACF = sum(tACF), tACA = sum(tACA), tAFF = sum(tAFF), tAFA = sum(tAFA),
tASF = sum(tASF), tASA = sum(tASA), tAGF = sum(tAGF), tAGA = sum(tAGA), tAxGF = sum(tAxGF), tAxGA = sum(tAxGA), tMCF = sum(tMCF), tMCA = sum(tMCA),
tMFF = sum(tMFF), tMFA = sum(tMFA), tMSF = sum(tMSF), tMSA = sum(tMSA), tMGF = sum(tMGF), tMGA = sum(tMGA), tMxGF = sum(tMxGF), tMxGA = sum(tMxGA),
tOZS = sum(tOZS), tDZS = sum(tDZS), tNZS = sum(tNZS)) %>% data.frame() %>%
mutate(OCF = tCF - CF, OCA = tCA - CA,
OFF = tFF - FF, OFA = tFA - FA,
OSF = tSF - SF, OSA = tSA - SA,
OGF = tGF - GF, OGA = tGA - GA,
OxGF = txGF - xGF, OxGA = txGA - xGA,
OACF = tACF - ACF, OACA = tACA - ACA,
OAFF = tAFF - AFF, OAFA = tAFA - AFA,
OASF = tASF - ASF, OASA = tASA - ASA,
OAGF = tAGF - AGF, OAGA = tAGA - AGA,
OAxGF = tAxGF - AxGF, OAxGA = tAxGA - AxGA,
OMCF = tMCF - MCF, OMCA = tMCA - MCA,
OMFF = tMFF - MFF, OMFA = tMFA - MFA,
OMSF = tMSF - MSF, OMSA = tMSA - MSA,
OMGF = tMGF - MGF, OMGA = tMGA - MGA,
OMxGF = tMxGF - MxGF, OMxGA = tMxGA - MxGA,
OOZS = tOZS - OZS, ODZS = tDZS - DZS, ONZS = tNZS - NZS) %>% data.frame() %>%
select(-c(tCF:tNZS)) %>%
data.frame()
# Combo
# Group leftover strength states
combo$Strength.State[which(combo$Strength.State %in% c("5v5", "5v4", "4v5", "4v4", "3v3", "0v0") == FALSE)] <- "XvX" # EXCLUDE SHOOTOUT
sumline <- filter(combo, grepl("C|L|R", as.character(P3.POS)) == TRUE & grepl("C|L|R", as.character(P2.POS)) == TRUE & grepl("C|L|R", as.character(P1.POS)) == TRUE & Strength.State != "0v0") %>%
group_by(Combo.Code, Season, Strength.State, Season.Type, Venue) %>%
summarise(Team = paste(unique(Team), collapse = "/"), TOI = sum(TOI), P1 = first(P1), P1.POS = first(P1.POS),
P2 = first(P2), P2.POS = first(P2.POS), P3 = first(P3), P3.POS = first(P3.POS),
CF = sum(CF), CA = sum(CA), FF = sum(FF), FA = sum(FA),
SF = sum(SF), SA = sum(SA), GF = sum(GF), GA = sum(GA), xGF = sum(xGF), xGA = sum(xGA),
ACF = sum(ACF), ACA = sum(ACA), AFF = sum(AFF), AFA = sum(AFA), ASF = sum(ASF), ASA = sum(ASA), AGF = sum(AGF), AGA = sum(AGA),
AxGF = sum(AxGF), AxGA = sum(AxGA), MCF = sum(MCF), MCA = sum(MCA), MFF = sum(MFF), MFA = sum(MFA),
MSF = sum(MSF), MSA = sum(MSA), MGF = sum(MGF), MGA = sum(MGA), MxGF = sum(MxGF), MxGA = sum(MxGA),
OZS = sum(OZS), DZS = sum(DZS), NZS = sum(NZS), OZF = sum(OZF), DZF = sum(DZF), NZF = sum(NZF),
FOW = sum(FOW), FOL = sum(FOL), HF = sum(HF), HA = sum(HA),
GVA = sum(GVA), TKA = sum(TKA), PENT = sum(PENT), PEND = sum(PEND),
P1.G = sum(P1.G), P1.A1 = sum(na.omit(P1.A1)), P1.A2 = sum(na.omit(P1.A2)),
P2.G = sum(P2.G), P2.A1 = sum(na.omit(P2.A1)), P2.A2 = sum(na.omit(P2.A2)),
P3.G = sum(P3.G), P3.A1 = sum(na.omit(P3.A1)), P3.A2 = sum(na.omit(P3.A2)),
tTOI = sum(tTOI), tCF = sum(tCF), tCA = sum(tCA), tFF = sum(tFF), tFA = sum(tFA), tSF = sum(tSF), tSA = sum(tSA),
tGF = sum(tGF), tGA = sum(tGA), txGF = sum(txGF), txGA = sum(txGA), tACF = sum(tACF), tACA = sum(tACA), tAFF = sum(tAFF), tAFA = sum(tAFA),
tASF = sum(tASF), tASA = sum(tASA), tAGF = sum(tAGF), tAGA = sum(tAGA), tAxGF = sum(tAxGF), tAxGA = sum(tAxGA), tMCF = sum(tMCF), tMCA = sum(tMCA),
tMFF = sum(tMFF), tMFA = sum(tMFA), tMSF = sum(tMSF), tMSA = sum(tMSA), tMGF = sum(tMGF), tMGA = sum(tMGA), tMxGF = sum(tMxGF), tMxGA = sum(tMxGA),
tOZS = sum(tOZS), tDZS = sum(DZS), tNZS = sum(tNZS)) %>% data.frame() %>%
mutate(OCF = tCF - CF, OCA = tCA - CA,
OFF = tFF - FF, OFA = tFA - FA,
OSF = tSF - SF, OSA = tSA - SA,
OGF = tGF - GF, OGA = tGA - GA,
OxGF = txGF - xGF, OxGA = txGA - xGA,
OACF = tACF - ACF, OACA = tACA - ACA,
OAFF = tAFF - AFF, OAFA = tAFA - AFA,
OASF = tASF - ASF, OASA = tASA - ASA,
OAGF = tAGF - AGF, OAGA = tAGA - AGA,
OAxGF = tAxGF - AxGF, OAxGA = tAxGA - AxGA,
OMCF = tMCF - MCF, OMCA = tMCA - MCA,
OMFF = tMFF - MFF, OMFA = tMFA - MFA,
OMSF = tMSF - MSF, OMSA = tMSA - MSA,
OMGF = tMGF - MGF, OMGA = tMGA - MGA,
OMxGF = tMxGF - MxGF, OMxGA = tMxGA - MxGA,
OOZS = tOZS - OZS, ODZS = tDZS - DZS, ONZS = tNZS - NZS) %>%
select(-c(tCF:tMxGA)) %>%
data.frame()
sumpair <- filter(combo, as.character(P3) == "X" & grepl("D", as.character(P2.POS)) == TRUE & grepl("D", as.character(P1.POS)) == TRUE & Strength.State != "0v0") %>%
group_by(Combo.Code, Season, Strength.State, Season.Type, Venue) %>%
summarise(Team = paste(unique(Team), collapse = "/"), TOI = sum(TOI),
P1 = first(P1), P1.POS = first(P1.POS), P2 = first(P2), P2.POS = first(P2.POS),
CF = sum(CF), CA = sum(CA), FF = sum(FF), FA = sum(FA),
SF = sum(SF), SA = sum(SA), GF = sum(GF), GA = sum(GA), xGF = sum(xGF), xGA = sum(xGA),
ACF = sum(ACF), ACA = sum(ACA), AFF = sum(AFF), AFA = sum(AFA), ASF = sum(ASF), ASA = sum(ASA), AGF = sum(AGF), AGA = sum(AGA),
AxGF = sum(AxGF), AxGA = sum(AxGA), MCF = sum(MCF), MCA = sum(MCA), MFF = sum(MFF), MFA = sum(MFA),
MSF = sum(MSF), MSA = sum(MSA), MGF = sum(MGF), MGA = sum(MGA), MxGF = sum(MxGF), MxGA = sum(MxGA),
OZS = sum(OZS), DZS = sum(DZS), NZS = sum(NZS), OZF = sum(OZF), DZF = sum(DZF), NZF = sum(NZF),
FOW = sum(FOW), FOL = sum(FOL), HF = sum(HF), HA = sum(HA),
GVA = sum(GVA), TKA = sum(TKA), PENT = sum(PENT), PEND = sum(PEND),
P1.G = sum(P1.G), P1.A1 = sum(na.omit(P1.A1)), P1.A2 = sum(na.omit(P1.A2)),
P2.G = sum(P2.G), P2.A1 = sum(na.omit(P2.A1)), P2.A2 = sum(na.omit(P2.A2)),
tTOI = sum(tTOI), tCF = sum(tCF), tCA = sum(tCA), tFF = sum(tFF), tFA = sum(tFA), tSF = sum(tSF), tSA = sum(tSA),
tGF = sum(tGF), tGA = sum(tGA), txGF = sum(txGF), txGA = sum(txGA), tACF = sum(tACF), tACA = sum(tACA), tAFF = sum(tAFF), tAFA = sum(tAFA),
tASF = sum(tASF), tASA = sum(tASA), tAGF = sum(tAGF), tAGA = sum(tAGA), tAxGF = sum(tAxGF), tAxGA = sum(tAxGA), tMCF = sum(tMCF), tMCA = sum(tMCA),
tMFF = sum(tMFF), tMFA = sum(tMFA), tMSF = sum(tMSF), tMSA = sum(tMSA), tMGF = sum(tMGF), tMGA = sum(tMGA), tMxGF = sum(tMxGF), tMxGA = sum(tMxGA),
tOZS = sum(tOZS), tDZS = sum(DZS), tNZS = sum(tNZS)) %>% data.frame() %>%
mutate(OCF = tCF - CF, OCA = tCA - CA,
OFF = tFF - FF, OFA = tFA - FA,
OSF = tSF - SF, OSA = tSA - SA,
OGF = tGF - GF, OGA = tGA - GA,
OxGF = txGF - xGF, OxGA = txGA - xGA,
OACF = tACF - ACF, OACA = tACA - ACA,
OAFF = tAFF - AFF, OAFA = tAFA - AFA,
OASF = tASF - ASF, OASA = tASA - ASA,
OAGF = tAGF - AGF, OAGA = tAGA - AGA,
OAxGF = tAxGF - AxGF, OAxGA = tAxGA - AxGA,
OMCF = tMCF - MCF, OMCA = tMCA - MCA,
OMFF = tMFF - MFF, OMFA = tMFA - MFA,
OMSF = tMSF - MSF, OMSA = tMSA - MSA,
OMGF = tMGF - MGF, OMGA = tMGA - MGA,
OMxGF = tMxGF - MxGF, OMxGA = tMxGA - MxGA,
OOZS = tOZS - OZS, ODZS = tDZS - DZS, ONZS = tNZS - NZS) %>%
select(-c(tCF:tMxGA)) %>%
data.frame()
# ASSIST NETWORK
end <- Sys.time()
print(end - start)
################################################################################################################################################################################################################
################################################################################################################################################################################################################
################################################################################################################################################################################################################
## Write to Dropbox
# Roster
# write.csv(sumroster, file = "~/Dropbox/rostertest.csv")
save(sumroster, file = "~/Dropbox/rostertest.Rda")
# Team
# write.csv(sumteam, file = "~/Dropbox/teamtest.csv")
save(sumteam, file = "~/Dropbox/teamtest.Rda")
# Goalie
# write.csv(sumgoalie, file = "~/Dropbox/goalietest.csv")
save(sumgoalie, file = "~/Dropbox/goalietest.Rda")
# Player
# write.csv(sumplayer, file = "~/Dropbox/playertest.csv")
save(sumplayer, file = "~/Dropbox/playertest.Rda")
# Combo
# write.csv(sumcombo, file = "~/Dropbox/combotest.csv")
save(sumpair, file = "~/Dropbox/pairtest.Rda")
save(sumline, file = "~/Dropbox/linetest.Rda")
################################################################################################################################################################################################################
################################################################################################################################################################################################################
################################################################################################################################################################################################################
| /Push.R | no_license | Cophy08/Push | R | false | false | 15,736 | r | # Push
# Last edited 1/17/2016
# Manny
## NOTES:
# REDUCE SIZE? NEED TO SHORTEN LOAD TIME
# REDUCE AT LEAST ONE OF STRENGTH OR SCORE STATE (GROUP PP/SH OR LEADING/TRAILING)
# KEEP ONLY LINES AND PAIRINGS (SET MIN TOI?)
# Load libraries
library(RSQLite)
library(dplyr)
## Load DB Tables
# Link to database
link <- "~/Documents/dryscrape data/dryscrape.sqlite"
newcon <- dbConnect(SQLite(), link)
start <- Sys.time()
# Read tables
roster <- dbReadTable(newcon, "roster")
team <- dbReadTable(newcon, "team")
goalie <- dbReadTable(newcon, "goalie")
player <- dbReadTable(newcon, "player")
combo <- dbReadTable(newcon, "combo")
## Aggregate Stats
# Roster
sumroster <- group_by(roster, Full.Name, Season, Season.Type) %>%
summarise(Team = paste(unique(Team), collapse = "/"), Number = paste(unique(Number), collapse = "/"), Team.Num = paste(unique(Team.Num), collapse = "/"),
Position = paste(unique(Position), collapse = "/"), Last.Name = first(Last.Name), First.Name = first(First.Name)) %>%
data.frame()
# Team
teamgp <- group_by(team, Team, Season, Season.Type) %>% summarise(GP = length(unique(Newcode))) %>% data.frame() %>%
mutate(Code = paste(Team, Season, Season.Type, sep = ".")) %>% data.frame()
# Group leftover strength states
team$Strength.State[which(team$Strength.State %in% c("5v5", "5v4", "4v5", "4v4", "5v3", "3v5", "3v3", "4v3", "3v4", "0v0") == FALSE)] <- "XvX" # EXCLUDE SHOOTOUT
sumteam <- filter(team, Strength.State != "0v0") %>% group_by(Team, Season, Venue, Strength.State, Score.Cat, Season.Type) %>%
summarise(GP = teamgp$GP[match(paste(first(Team), first(Season), first(Season.Type), sep = "."), teamgp$Code)],
TOI = sum(TOI), CF = sum(CF), CA = sum(CA), FF = sum(FF), FA = sum(FA), SF = sum(SF), SA = sum(SA), GF = sum(GF), GA = sum(GA),
xGF = sum(xGF), xGA = sum(xGA), ACF = sum(ACF), ACA = sum(ACA), AFF = sum(AFF), AFA = sum(AFA), ASF = sum(ASF), ASA = sum(ASA),
AGF = sum(AGF), AGA = sum(AGA), AxGF = sum(AxGF), AxGA = sum(AxGA), MCF = sum(MCF), MCA = sum(MCA), MFF = sum(MFF), MFA = sum(MFA),
MSF = sum(MSF), MSA = sum(MSA), MGF = sum(MGF), MGA = sum(MGA), MxGF = sum(MxGF), MxGA = sum(MxGA), OZS = sum(OZS), DZS = sum(DZS), NZS = sum(NZS), OZF = sum(OZF),
DZF = sum(DZF), NZF = sum(NZF), FOW = sum(FOW), FOL = sum(FOL), HF = sum(HF), HA = sum(HA), GVA = sum(GVA), TKA = sum(TKA),
PENT = sum(PENT), PEND = sum(PEND), DISTF = sum(DISTF), DISTA = sum(DISTA)) %>% data.frame()
# Goalie
goaliegp <- group_by(goalie, Player, Season, Season.Type) %>% summarise(GP = length(unique(Newcode))) %>% data.frame() %>%
mutate(Code = paste(Player, Season, Season.Type, sep = ".")) %>% data.frame()
# Group leftover strength states
goalie$Strength.State[which(goalie$Strength.State %in% c("5v5", "5v4", "4v5", "4v4", "5v3", "3v5", "3v3", "4v3", "3v4", "0v0") == FALSE)] <- "XvX" # EXCLUDE SHOOTOUT
sumgoalie <- filter(goalie, Strength.State != "0v0") %>% group_by(Player, Season, Venue, Strength.State, Score.Cat, Season.Type) %>%
summarise(GP = goaliegp$GP[match(paste(first(Player), first(Season), first(Season.Type), sep = "."), goaliegp$Code)],
Team = paste(unique(Team), collapse = "/"), TOI = sum(TOI), CF = sum(CF), CA = sum(CA), FF = sum(FF), FA = sum(FA), SF = sum(SF), SA = sum(SA),
GF = sum(GF), GA = sum(GA), xGF = sum(xGF), xGA = sum(xGA), ACF = sum(ACF), ACA = sum(ACA), AFF = sum(AFF), AFA = sum(AFA), ASF = sum(ASF), ASA = sum(ASA),
AGF = sum(AGF), AGA = sum(AGA), AxGF = sum(AxGF), AxGA = sum(AxGA), MCF = sum(MCF), MCA = sum(MCA), MFF = sum(MFF), MFA = sum(MFA),
MSF = sum(MSF), MSA = sum(MSA), MGF = sum(MGF), MGA = sum(MGA), MxGF = sum(MxGF), MxGA = sum(MxGA), OZS = sum(OZS), DZS = sum(DZS), NZS = sum(NZS), OZF = sum(OZF),
DZF = sum(DZF), NZF = sum(NZF), FOW = sum(FOW), FOL = sum(FOL), HF = sum(HF), HA = sum(HA), GVA = sum(GVA), TKA = sum(TKA),
PENT = sum(PENT), PEND = sum(PEND), DISTA = sum(DISTA), G = sum(G), A1 = sum(na.omit(A1)), A2 = sum(na.omit(A2)), SOA = sum(SOA), SOG = sum(SOG),
iPENT = sum(iPENT), iPEND = sum(na.omit(iPEND))) %>% data.frame()
# Player
playergp <- group_by(player, Player, Season, Season.Type) %>% summarise(GP = length(unique(Newcode))) %>% data.frame() %>%
mutate(Code = paste(Player, Season, Season.Type, sep = ".")) %>% data.frame()
# Group leftover strength states
player$Strength.State[which(player$Strength.State %in% c("5v5", "5v4", "4v5", "4v4", "5v3", "3v5", "3v3", "4v3", "3v4", "0v0") == FALSE)] <- "XvX" # EXCLUDE SHOOTOUT
sumplayer <- filter(player, Strength.State != "0v0") %>% group_by(Player, Season, Venue, Strength.State, Score.Cat, Season.Type) %>%
summarise(GP = playergp$GP[match(paste(first(Player), first(Season), first(Season.Type), sep = "."), playergp$Code)],
Position = sumroster$Position[match(first(Player), sumroster$Full.Name)],
Team = paste(unique(Team), collapse = "/"), TOI = sum(TOI), CF = sum(CF), CA = sum(CA), iCF = sum(iCF), FF = sum(FF), FA = sum(FA), iFF = sum(iFF),
SF = sum(SF), SA = sum(SA), iSF = sum(iSF), GF = sum(GF), GA = sum(GA), xGF = sum(xGF), xGA = sum(xGA), ixG = sum(ixG),
ACF = sum(ACF), ACA = sum(ACA), AFF = sum(AFF), AFA = sum(AFA), ASF = sum(ASF), ASA = sum(ASA), AGF = sum(AGF), AGA = sum(AGA),
AxGF = sum(AxGF), AxGA = sum(AxGA), MCF = sum(MCF), MCA = sum(MCA), MFF = sum(MFF), MFA = sum(MFA),
MSF = sum(MSF), MSA = sum(MSA), MGF = sum(MGF), MGA = sum(MGA), MxGF = sum(MxGF), MxGA = sum(MxGA),
OZS = sum(OZS), DZS = sum(DZS), NZS = sum(NZS), OTF = sum(OTF), OZF = sum(OZF), DZF = sum(DZF), NZF = sum(NZF),
FOW = sum(FOW), FOL = sum(FOL), iFOW = sum(iFOW), iFOL = sum(iFOL), HF = sum(HF), HA = sum(HA), iHF = sum(iHF), iHA = sum(iHA),
GVA = sum(GVA), TKA = sum(TKA), iGVA = sum(iGVA), iTKA = sum(iTKA), iBLK = sum(iBLK), PENT = sum(PENT), PEND = sum(PEND),
iDIST = sum(iDIST), G = sum(G), A1 = sum(na.omit(A1)), A2 = sum(na.omit(A2)), SOA = sum(SOA), SOG = sum(SOG), iPENT = sum(iPENT), iPEND = sum(na.omit(iPEND)),
tTOI = sum(tTOI), tCF = sum(tCF), tCA = sum(tCA), tFF = sum(tFF), tFA = sum(tFA), tSF = sum(tSF), tSA = sum(tSA),
tGF = sum(tGF), tGA = sum(tGA), txGF = sum(txGF), txGA = sum(txGA), tACF = sum(tACF), tACA = sum(tACA), tAFF = sum(tAFF), tAFA = sum(tAFA),
tASF = sum(tASF), tASA = sum(tASA), tAGF = sum(tAGF), tAGA = sum(tAGA), tAxGF = sum(tAxGF), tAxGA = sum(tAxGA), tMCF = sum(tMCF), tMCA = sum(tMCA),
tMFF = sum(tMFF), tMFA = sum(tMFA), tMSF = sum(tMSF), tMSA = sum(tMSA), tMGF = sum(tMGF), tMGA = sum(tMGA), tMxGF = sum(tMxGF), tMxGA = sum(tMxGA),
tOZS = sum(tOZS), tDZS = sum(tDZS), tNZS = sum(tNZS)) %>% data.frame() %>%
mutate(OCF = tCF - CF, OCA = tCA - CA,
OFF = tFF - FF, OFA = tFA - FA,
OSF = tSF - SF, OSA = tSA - SA,
OGF = tGF - GF, OGA = tGA - GA,
OxGF = txGF - xGF, OxGA = txGA - xGA,
OACF = tACF - ACF, OACA = tACA - ACA,
OAFF = tAFF - AFF, OAFA = tAFA - AFA,
OASF = tASF - ASF, OASA = tASA - ASA,
OAGF = tAGF - AGF, OAGA = tAGA - AGA,
OAxGF = tAxGF - AxGF, OAxGA = tAxGA - AxGA,
OMCF = tMCF - MCF, OMCA = tMCA - MCA,
OMFF = tMFF - MFF, OMFA = tMFA - MFA,
OMSF = tMSF - MSF, OMSA = tMSA - MSA,
OMGF = tMGF - MGF, OMGA = tMGA - MGA,
OMxGF = tMxGF - MxGF, OMxGA = tMxGA - MxGA,
OOZS = tOZS - OZS, ODZS = tDZS - DZS, ONZS = tNZS - NZS) %>% data.frame() %>%
select(-c(tCF:tNZS)) %>%
data.frame()
# Combo
# Group leftover strength states
combo$Strength.State[which(combo$Strength.State %in% c("5v5", "5v4", "4v5", "4v4", "3v3", "0v0") == FALSE)] <- "XvX" # EXCLUDE SHOOTOUT
sumline <- filter(combo, grepl("C|L|R", as.character(P3.POS)) == TRUE & grepl("C|L|R", as.character(P2.POS)) == TRUE & grepl("C|L|R", as.character(P1.POS)) == TRUE & Strength.State != "0v0") %>%
group_by(Combo.Code, Season, Strength.State, Season.Type, Venue) %>%
summarise(Team = paste(unique(Team), collapse = "/"), TOI = sum(TOI), P1 = first(P1), P1.POS = first(P1.POS),
P2 = first(P2), P2.POS = first(P2.POS), P3 = first(P3), P3.POS = first(P3.POS),
CF = sum(CF), CA = sum(CA), FF = sum(FF), FA = sum(FA),
SF = sum(SF), SA = sum(SA), GF = sum(GF), GA = sum(GA), xGF = sum(xGF), xGA = sum(xGA),
ACF = sum(ACF), ACA = sum(ACA), AFF = sum(AFF), AFA = sum(AFA), ASF = sum(ASF), ASA = sum(ASA), AGF = sum(AGF), AGA = sum(AGA),
AxGF = sum(AxGF), AxGA = sum(AxGA), MCF = sum(MCF), MCA = sum(MCA), MFF = sum(MFF), MFA = sum(MFA),
MSF = sum(MSF), MSA = sum(MSA), MGF = sum(MGF), MGA = sum(MGA), MxGF = sum(MxGF), MxGA = sum(MxGA),
OZS = sum(OZS), DZS = sum(DZS), NZS = sum(NZS), OZF = sum(OZF), DZF = sum(DZF), NZF = sum(NZF),
FOW = sum(FOW), FOL = sum(FOL), HF = sum(HF), HA = sum(HA),
GVA = sum(GVA), TKA = sum(TKA), PENT = sum(PENT), PEND = sum(PEND),
P1.G = sum(P1.G), P1.A1 = sum(na.omit(P1.A1)), P1.A2 = sum(na.omit(P1.A2)),
P2.G = sum(P2.G), P2.A1 = sum(na.omit(P2.A1)), P2.A2 = sum(na.omit(P2.A2)),
P3.G = sum(P3.G), P3.A1 = sum(na.omit(P3.A1)), P3.A2 = sum(na.omit(P3.A2)),
tTOI = sum(tTOI), tCF = sum(tCF), tCA = sum(tCA), tFF = sum(tFF), tFA = sum(tFA), tSF = sum(tSF), tSA = sum(tSA),
tGF = sum(tGF), tGA = sum(tGA), txGF = sum(txGF), txGA = sum(txGA), tACF = sum(tACF), tACA = sum(tACA), tAFF = sum(tAFF), tAFA = sum(tAFA),
tASF = sum(tASF), tASA = sum(tASA), tAGF = sum(tAGF), tAGA = sum(tAGA), tAxGF = sum(tAxGF), tAxGA = sum(tAxGA), tMCF = sum(tMCF), tMCA = sum(tMCA),
tMFF = sum(tMFF), tMFA = sum(tMFA), tMSF = sum(tMSF), tMSA = sum(tMSA), tMGF = sum(tMGF), tMGA = sum(tMGA), tMxGF = sum(tMxGF), tMxGA = sum(tMxGA),
tOZS = sum(tOZS), tDZS = sum(DZS), tNZS = sum(tNZS)) %>% data.frame() %>%
mutate(OCF = tCF - CF, OCA = tCA - CA,
OFF = tFF - FF, OFA = tFA - FA,
OSF = tSF - SF, OSA = tSA - SA,
OGF = tGF - GF, OGA = tGA - GA,
OxGF = txGF - xGF, OxGA = txGA - xGA,
OACF = tACF - ACF, OACA = tACA - ACA,
OAFF = tAFF - AFF, OAFA = tAFA - AFA,
OASF = tASF - ASF, OASA = tASA - ASA,
OAGF = tAGF - AGF, OAGA = tAGA - AGA,
OAxGF = tAxGF - AxGF, OAxGA = tAxGA - AxGA,
OMCF = tMCF - MCF, OMCA = tMCA - MCA,
OMFF = tMFF - MFF, OMFA = tMFA - MFA,
OMSF = tMSF - MSF, OMSA = tMSA - MSA,
OMGF = tMGF - MGF, OMGA = tMGA - MGA,
OMxGF = tMxGF - MxGF, OMxGA = tMxGA - MxGA,
OOZS = tOZS - OZS, ODZS = tDZS - DZS, ONZS = tNZS - NZS) %>%
select(-c(tCF:tMxGA)) %>%
data.frame()
sumpair <- filter(combo, as.character(P3) == "X" & grepl("D", as.character(P2.POS)) == TRUE & grepl("D", as.character(P1.POS)) == TRUE & Strength.State != "0v0") %>%
group_by(Combo.Code, Season, Strength.State, Season.Type, Venue) %>%
summarise(Team = paste(unique(Team), collapse = "/"), TOI = sum(TOI),
P1 = first(P1), P1.POS = first(P1.POS), P2 = first(P2), P2.POS = first(P2.POS),
CF = sum(CF), CA = sum(CA), FF = sum(FF), FA = sum(FA),
SF = sum(SF), SA = sum(SA), GF = sum(GF), GA = sum(GA), xGF = sum(xGF), xGA = sum(xGA),
ACF = sum(ACF), ACA = sum(ACA), AFF = sum(AFF), AFA = sum(AFA), ASF = sum(ASF), ASA = sum(ASA), AGF = sum(AGF), AGA = sum(AGA),
AxGF = sum(AxGF), AxGA = sum(AxGA), MCF = sum(MCF), MCA = sum(MCA), MFF = sum(MFF), MFA = sum(MFA),
MSF = sum(MSF), MSA = sum(MSA), MGF = sum(MGF), MGA = sum(MGA), MxGF = sum(MxGF), MxGA = sum(MxGA),
OZS = sum(OZS), DZS = sum(DZS), NZS = sum(NZS), OZF = sum(OZF), DZF = sum(DZF), NZF = sum(NZF),
FOW = sum(FOW), FOL = sum(FOL), HF = sum(HF), HA = sum(HA),
GVA = sum(GVA), TKA = sum(TKA), PENT = sum(PENT), PEND = sum(PEND),
P1.G = sum(P1.G), P1.A1 = sum(na.omit(P1.A1)), P1.A2 = sum(na.omit(P1.A2)),
P2.G = sum(P2.G), P2.A1 = sum(na.omit(P2.A1)), P2.A2 = sum(na.omit(P2.A2)),
tTOI = sum(tTOI), tCF = sum(tCF), tCA = sum(tCA), tFF = sum(tFF), tFA = sum(tFA), tSF = sum(tSF), tSA = sum(tSA),
tGF = sum(tGF), tGA = sum(tGA), txGF = sum(txGF), txGA = sum(txGA), tACF = sum(tACF), tACA = sum(tACA), tAFF = sum(tAFF), tAFA = sum(tAFA),
tASF = sum(tASF), tASA = sum(tASA), tAGF = sum(tAGF), tAGA = sum(tAGA), tAxGF = sum(tAxGF), tAxGA = sum(tAxGA), tMCF = sum(tMCF), tMCA = sum(tMCA),
tMFF = sum(tMFF), tMFA = sum(tMFA), tMSF = sum(tMSF), tMSA = sum(tMSA), tMGF = sum(tMGF), tMGA = sum(tMGA), tMxGF = sum(tMxGF), tMxGA = sum(tMxGA),
tOZS = sum(tOZS), tDZS = sum(DZS), tNZS = sum(tNZS)) %>% data.frame() %>%
mutate(OCF = tCF - CF, OCA = tCA - CA,
OFF = tFF - FF, OFA = tFA - FA,
OSF = tSF - SF, OSA = tSA - SA,
OGF = tGF - GF, OGA = tGA - GA,
OxGF = txGF - xGF, OxGA = txGA - xGA,
OACF = tACF - ACF, OACA = tACA - ACA,
OAFF = tAFF - AFF, OAFA = tAFA - AFA,
OASF = tASF - ASF, OASA = tASA - ASA,
OAGF = tAGF - AGF, OAGA = tAGA - AGA,
OAxGF = tAxGF - AxGF, OAxGA = tAxGA - AxGA,
OMCF = tMCF - MCF, OMCA = tMCA - MCA,
OMFF = tMFF - MFF, OMFA = tMFA - MFA,
OMSF = tMSF - MSF, OMSA = tMSA - MSA,
OMGF = tMGF - MGF, OMGA = tMGA - MGA,
OMxGF = tMxGF - MxGF, OMxGA = tMxGA - MxGA,
OOZS = tOZS - OZS, ODZS = tDZS - DZS, ONZS = tNZS - NZS) %>%
select(-c(tCF:tMxGA)) %>%
data.frame()
# ASSIST NETWORK
end <- Sys.time()
print(end - start)
################################################################################################################################################################################################################
################################################################################################################################################################################################################
################################################################################################################################################################################################################
## Write to Dropbox
# Roster
# write.csv(sumroster, file = "~/Dropbox/rostertest.csv")
save(sumroster, file = "~/Dropbox/rostertest.Rda")
# Team
# write.csv(sumteam, file = "~/Dropbox/teamtest.csv")
save(sumteam, file = "~/Dropbox/teamtest.Rda")
# Goalie
# write.csv(sumgoalie, file = "~/Dropbox/goalietest.csv")
save(sumgoalie, file = "~/Dropbox/goalietest.Rda")
# Player
# write.csv(sumplayer, file = "~/Dropbox/playertest.csv")
save(sumplayer, file = "~/Dropbox/playertest.Rda")
# Combo
# write.csv(sumcombo, file = "~/Dropbox/combotest.csv")
save(sumpair, file = "~/Dropbox/pairtest.Rda")
save(sumline, file = "~/Dropbox/linetest.Rda")
################################################################################################################################################################################################################
################################################################################################################################################################################################################
################################################################################################################################################################################################################
|
### Implementation of by-cases for vegan 2.2 versions of
### anova.cca. These are all internal functions that are not intended
### to be called by users in normal sessions, but they should be
### called from anova.cca (2.2). Therefore the user interface is rigid
### and input is not checked. The 'permutations' should be a
### permutation matrix.
### by = terms builds models as a sequence of adding terms and submits
### this to anova.ccalist
`anova.ccabyterm` <-
function(object, permutations, model, parallel)
{
## We need term labels but without Condition() terms
trms <- terms(object)
trmlab <- attr(trms, "term.labels")
trmlab <- trmlab[trmlab %in% attr(terms(object$terminfo),
"term.labels")]
ntrm <- length(trmlab)
m0 <- update(object, paste(".~.-", paste(trmlab, collapse="-")))
mods <- list(m0)
for(i in seq_along(trmlab)) {
fla <- paste(". ~ . + ", trmlab[i])
mods[[i+1]] <- update(mods[[i]], fla)
}
## The result
sol <- anova.ccalist(mods, permutations = permutations,
model = model, parallel = parallel)
## Reformat
out <- data.frame(c(sol[-1,3], sol[ntrm+1,1]),
c(sol[-1,4], sol[ntrm+1,2]),
c(sol[-1,5], NA),
c(sol[-1,6], NA))
isRDA <- inherits(object, "rda")
colnames(out) <- c("Df", ifelse(isRDA, "Variance", "ChiSquare"),
"F", "Pr(>F)")
rownames(out) <- c(trmlab, "Residual")
head <- paste0("Permutation test for ", object$method, " under ",
model, " model\n",
"Terms added sequentially (first to last)\n",
howHead(attr(permutations, "control")))
mod <- paste("Model:", c(object$call))
attr(out, "heading") <- c(head, mod)
class(out) <- c("anova","data.frame")
out
}
## by = margin: this is not a anova.ccalist case, but we omit each
## term in turn and compare against the complete model.
`anova.ccabymargin` <-
function(object, permutations, scope, ...)
{
nperm <- nrow(permutations)
## Refuse to handle models with missing data
if (!is.null(object$na.action))
stop("by = 'margin' models cannot handle missing data")
## We need term labels but without Condition() terms
if (!is.null(scope) && is.character(scope))
trms <- scope
else
trms <- drop.scope(object)
trmlab <- trms[trms %in% attr(terms(object$terminfo),
"term.labels")]
if(length(trmlab) == 0)
stop("the scope was empty: no available marginal terms")
## baseline: all terms
big <- permutest(object, permutations, ...)
dfbig <- big$df[2]
chibig <- big$chi[2]
scale <- big$den/dfbig
## Collect all marginal models. This differs from old version
## (vegan 2.0) where other but 'nm' were partialled out within
## Condition(). Now we only fit the model without 'nm' and compare
## the difference against the complete model.
mods <- lapply(trmlab, function(nm, ...)
permutest(update(object, paste(".~.-", nm)),
permutations, ...), ...)
## Chande in df
Df <- sapply(mods, function(x) x$df[2]) - dfbig
## F of change
Chisq <- sapply(mods, function(x) x$chi[2]) - chibig
Fstat <- (Chisq/Df)/(chibig/dfbig)
## Simulated F-values
Fval <- sapply(mods, function(x) x$num)
## Had we an empty model we need to clone the denominator
if (length(Fval) == 1)
Fval <- matrix(Fval, nrow=nperm)
Fval <- sweep(-Fval, 1, big$num, "+")
Fval <- sweep(Fval, 2, Df, "/")
Fval <- sweep(Fval, 1, scale, "/")
## Simulated P-values
Pval <- (colSums(sweep(Fval, 2, Fstat, ">=")) + 1)/(nperm + 1)
## Collect results to anova data.frame
out <- data.frame(c(Df, dfbig), c(Chisq, chibig),
c(Fstat, NA), c(Pval, NA))
isRDA <- inherits(object, "rda")
colnames(out) <- c("Df", ifelse(isRDA, "Variance", "ChiSquare"),
"F", "Pr(>F)")
rownames(out) <- c(trmlab, "Residual")
head <- paste0("Permutation test for ", object$method, " under ",
mods[[1]]$model, " model\n",
"Marginal effects of terms\n",
howHead(attr(permutations, "control")))
mod <- paste("Model:", c(object$call))
attr(out, "heading") <- c(head, mod)
class(out) <- c("anova", "data.frame")
out
}
### Marginal test for axes
`anova.ccabyaxis` <-
function(object, permutations, model, parallel, cutoff = 1)
{
nperm <- nrow(permutations)
## Observed F-values and Df
eig <- object$CCA$eig
resdf <- nobs(object) - length(eig) - max(object$pCCA$rank, 0) - 1
Fstat <- eig/object$CA$tot.chi*resdf
Df <- rep(1, length(eig))
## Marginal P-values
LC <- object$CCA$u
## missing values?
if (!is.null(object$na.action))
LC <- napredict(structure(object$na.action,
class="exclude"), LC)
## subset?
if (!is.null(object$subset)) {
tmp <- matrix(NA, nrow=length(object$subset),
ncol = ncol(LC))
tmp[object$subset,] <- LC
LC <- tmp
object <- update(object, subset = object$subset)
}
LC <- as.data.frame(LC)
fla <- reformulate(names(LC))
Pvals <- rep(NA, length(eig))
environment(object$terms) <- environment()
for (i in seq_along(eig)) {
part <- paste("~ . +Condition(",
paste(names(LC)[-i], collapse = "+"), ")")
upfla <- update(fla, part)
## only one axis, and cannot partial out?
if (length(eig) == 1)
mod <- permutest(object, permutations, model = model,
parallel = parallel)
else
mod <-
permutest(update(object, upfla, data = LC),
permutations, model = model,
parallel = parallel)
Pvals[i] <- (sum(mod$F.perm >= mod$F.0) + 1)/(nperm+1)
if (Pvals[i] > cutoff)
break
}
out <- data.frame(c(Df, resdf), c(eig, object$CA$tot.chi),
c(Fstat, NA), c(Pvals,NA))
rownames(out) <- c(names(eig), "Residual")
isRDA <- inherits(object, "rda")
colnames(out) <- c("Df", ifelse(isRDA, "Variance", "ChiSquare"),
"F", "Pr(>F)")
head <- paste0("Permutation test for ", object$method, " under ",
model, " model\n",
"Marginal tests for axes\n",
howHead(attr(permutations, "control")))
mod <- paste("Model:", c(object$call))
attr(out, "heading") <- c(head, mod)
class(out) <- c("anova", "data.frame")
out
}
| /R/anova.ccabyterm.R | no_license | guiblanchet/vegan | R | false | false | 6,799 | r | ### Implementation of by-cases for vegan 2.2 versions of
### anova.cca. These are all internal functions that are not intended
### to be called by users in normal sessions, but they should be
### called from anova.cca (2.2). Therefore the user interface is rigid
### and input is not checked. The 'permutations' should be a
### permutation matrix.
### by = terms builds models as a sequence of adding terms and submits
### this to anova.ccalist
`anova.ccabyterm` <-
function(object, permutations, model, parallel)
{
## We need term labels but without Condition() terms
trms <- terms(object)
trmlab <- attr(trms, "term.labels")
trmlab <- trmlab[trmlab %in% attr(terms(object$terminfo),
"term.labels")]
ntrm <- length(trmlab)
m0 <- update(object, paste(".~.-", paste(trmlab, collapse="-")))
mods <- list(m0)
for(i in seq_along(trmlab)) {
fla <- paste(". ~ . + ", trmlab[i])
mods[[i+1]] <- update(mods[[i]], fla)
}
## The result
sol <- anova.ccalist(mods, permutations = permutations,
model = model, parallel = parallel)
## Reformat
out <- data.frame(c(sol[-1,3], sol[ntrm+1,1]),
c(sol[-1,4], sol[ntrm+1,2]),
c(sol[-1,5], NA),
c(sol[-1,6], NA))
isRDA <- inherits(object, "rda")
colnames(out) <- c("Df", ifelse(isRDA, "Variance", "ChiSquare"),
"F", "Pr(>F)")
rownames(out) <- c(trmlab, "Residual")
head <- paste0("Permutation test for ", object$method, " under ",
model, " model\n",
"Terms added sequentially (first to last)\n",
howHead(attr(permutations, "control")))
mod <- paste("Model:", c(object$call))
attr(out, "heading") <- c(head, mod)
class(out) <- c("anova","data.frame")
out
}
## by = margin: this is not a anova.ccalist case, but we omit each
## term in turn and compare against the complete model.
`anova.ccabymargin` <-
function(object, permutations, scope, ...)
{
nperm <- nrow(permutations)
## Refuse to handle models with missing data
if (!is.null(object$na.action))
stop("by = 'margin' models cannot handle missing data")
## We need term labels but without Condition() terms
if (!is.null(scope) && is.character(scope))
trms <- scope
else
trms <- drop.scope(object)
trmlab <- trms[trms %in% attr(terms(object$terminfo),
"term.labels")]
if(length(trmlab) == 0)
stop("the scope was empty: no available marginal terms")
## baseline: all terms
big <- permutest(object, permutations, ...)
dfbig <- big$df[2]
chibig <- big$chi[2]
scale <- big$den/dfbig
## Collect all marginal models. This differs from old version
## (vegan 2.0) where other but 'nm' were partialled out within
## Condition(). Now we only fit the model without 'nm' and compare
## the difference against the complete model.
mods <- lapply(trmlab, function(nm, ...)
permutest(update(object, paste(".~.-", nm)),
permutations, ...), ...)
## Chande in df
Df <- sapply(mods, function(x) x$df[2]) - dfbig
## F of change
Chisq <- sapply(mods, function(x) x$chi[2]) - chibig
Fstat <- (Chisq/Df)/(chibig/dfbig)
## Simulated F-values
Fval <- sapply(mods, function(x) x$num)
## Had we an empty model we need to clone the denominator
if (length(Fval) == 1)
Fval <- matrix(Fval, nrow=nperm)
Fval <- sweep(-Fval, 1, big$num, "+")
Fval <- sweep(Fval, 2, Df, "/")
Fval <- sweep(Fval, 1, scale, "/")
## Simulated P-values
Pval <- (colSums(sweep(Fval, 2, Fstat, ">=")) + 1)/(nperm + 1)
## Collect results to anova data.frame
out <- data.frame(c(Df, dfbig), c(Chisq, chibig),
c(Fstat, NA), c(Pval, NA))
isRDA <- inherits(object, "rda")
colnames(out) <- c("Df", ifelse(isRDA, "Variance", "ChiSquare"),
"F", "Pr(>F)")
rownames(out) <- c(trmlab, "Residual")
head <- paste0("Permutation test for ", object$method, " under ",
mods[[1]]$model, " model\n",
"Marginal effects of terms\n",
howHead(attr(permutations, "control")))
mod <- paste("Model:", c(object$call))
attr(out, "heading") <- c(head, mod)
class(out) <- c("anova", "data.frame")
out
}
### Marginal test for axes
`anova.ccabyaxis` <-
function(object, permutations, model, parallel, cutoff = 1)
{
nperm <- nrow(permutations)
## Observed F-values and Df
eig <- object$CCA$eig
resdf <- nobs(object) - length(eig) - max(object$pCCA$rank, 0) - 1
Fstat <- eig/object$CA$tot.chi*resdf
Df <- rep(1, length(eig))
## Marginal P-values
LC <- object$CCA$u
## missing values?
if (!is.null(object$na.action))
LC <- napredict(structure(object$na.action,
class="exclude"), LC)
## subset?
if (!is.null(object$subset)) {
tmp <- matrix(NA, nrow=length(object$subset),
ncol = ncol(LC))
tmp[object$subset,] <- LC
LC <- tmp
object <- update(object, subset = object$subset)
}
LC <- as.data.frame(LC)
fla <- reformulate(names(LC))
Pvals <- rep(NA, length(eig))
environment(object$terms) <- environment()
for (i in seq_along(eig)) {
part <- paste("~ . +Condition(",
paste(names(LC)[-i], collapse = "+"), ")")
upfla <- update(fla, part)
## only one axis, and cannot partial out?
if (length(eig) == 1)
mod <- permutest(object, permutations, model = model,
parallel = parallel)
else
mod <-
permutest(update(object, upfla, data = LC),
permutations, model = model,
parallel = parallel)
Pvals[i] <- (sum(mod$F.perm >= mod$F.0) + 1)/(nperm+1)
if (Pvals[i] > cutoff)
break
}
out <- data.frame(c(Df, resdf), c(eig, object$CA$tot.chi),
c(Fstat, NA), c(Pvals,NA))
rownames(out) <- c(names(eig), "Residual")
isRDA <- inherits(object, "rda")
colnames(out) <- c("Df", ifelse(isRDA, "Variance", "ChiSquare"),
"F", "Pr(>F)")
head <- paste0("Permutation test for ", object$method, " under ",
model, " model\n",
"Marginal tests for axes\n",
howHead(attr(permutations, "control")))
mod <- paste("Model:", c(object$call))
attr(out, "heading") <- c(head, mod)
class(out) <- c("anova", "data.frame")
out
}
|
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.1278173040278e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result) | /dann/inst/testfiles/calc_distance_C/AFL_calc_distance_C/calc_distance_C_valgrind_files/1609866813-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 1,199 | r | testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.1278173040278e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seq_scan_sim.R
\name{seq_scan_sim}
\alias{seq_scan_sim}
\title{Perform scan test on simulated data sequentially}
\usage{
seq_scan_sim(
nsim = 1,
nn,
ty,
ex,
type = "poisson",
ein = NULL,
eout = NULL,
tpop = NULL,
popin = NULL,
popout = NULL,
cl = NULL,
simdist = "multinomial",
pop = NULL,
min.cases = 0,
ldup = NULL,
lseq_zones
)
}
\arguments{
\item{nsim}{A positive integer indicating the number of
simulations to perform.}
\item{nn}{A list of nearest neighbors produced by \code{\link{nnpop}}.}
\item{ty}{The total number of cases in the study area.}
\item{ex}{The expected number of cases for each region.
The default is calculated under the constant risk
hypothesis.}
\item{type}{The type of scan statistic to compute. The
default is \code{"poisson"}. The other choice
is \code{"binomial"}.}
\item{ein}{The expected number of cases in the zone.
Conventionally, this is the estimated overall disease
risk across the study area, multiplied by the total
population size of the zone.}
\item{eout}{The expected number of cases outside the
zone. This should be \code{ty - ein} and is computed
automatically if not provided.}
\item{tpop}{The total population in the study area.}
\item{popin}{The total population in the zone.}
\item{popout}{The population outside the zone. This
should be \code{tpop - popin} and is computed
automatically if not provided.}
\item{cl}{
A cluster object created by \code{\link{makeCluster}},
or an integer to indicate number of child-processes
(integer values are ignored on Windows) for parallel evaluations
(see Details on performance).
It can also be \code{"future"} to use a future backend (see Details),
\code{NULL} (default) refers to sequential evaluation.
}
\item{simdist}{Character string indicating the simulation
distribution. The default is \code{"multinomial"}, which
conditions on the total number of cases observed. The
other options are \code{"poisson"} and \code{"binomial"}}
\item{pop}{The population size associated with each
region.}
\item{min.cases}{The minimum number of cases required for
a cluster. The default is 2.}
\item{ldup}{A logical vector indicating positions of duplicated zones. Not
intended for user use.}
\item{lseq_zones}{A list of logical vectors specifying the sequence of
relevant zones based on ubpop constraints}
}
\value{
A list with the maximum statistic for each population upperbound for
each simulated data set. Each element will have a vector of maximums for
each simulated data set corresponding to the sequence of ubpop values. The
list will have \code{nsim} elements.
}
\description{
\code{seq_scan_sim} efficiently performs \code{\link{scan.test}} on a
simulated data set. The function is meant to be used internally by the
\code{\link{optimal_ubpop}} function in the smerc package.
}
\keyword{internal}
| /man/seq_scan_sim.Rd | no_license | cran/smerc | R | false | true | 3,016 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seq_scan_sim.R
\name{seq_scan_sim}
\alias{seq_scan_sim}
\title{Perform scan test on simulated data sequentially}
\usage{
seq_scan_sim(
nsim = 1,
nn,
ty,
ex,
type = "poisson",
ein = NULL,
eout = NULL,
tpop = NULL,
popin = NULL,
popout = NULL,
cl = NULL,
simdist = "multinomial",
pop = NULL,
min.cases = 0,
ldup = NULL,
lseq_zones
)
}
\arguments{
\item{nsim}{A positive integer indicating the number of
simulations to perform.}
\item{nn}{A list of nearest neighbors produced by \code{\link{nnpop}}.}
\item{ty}{The total number of cases in the study area.}
\item{ex}{The expected number of cases for each region.
The default is calculated under the constant risk
hypothesis.}
\item{type}{The type of scan statistic to compute. The
default is \code{"poisson"}. The other choice
is \code{"binomial"}.}
\item{ein}{The expected number of cases in the zone.
Conventionally, this is the estimated overall disease
risk across the study area, multiplied by the total
population size of the zone.}
\item{eout}{The expected number of cases outside the
zone. This should be \code{ty - ein} and is computed
automatically if not provided.}
\item{tpop}{The total population in the study area.}
\item{popin}{The total population in the zone.}
\item{popout}{The population outside the zone. This
should be \code{tpop - popin} and is computed
automatically if not provided.}
\item{cl}{
A cluster object created by \code{\link{makeCluster}},
or an integer to indicate number of child-processes
(integer values are ignored on Windows) for parallel evaluations
(see Details on performance).
It can also be \code{"future"} to use a future backend (see Details),
\code{NULL} (default) refers to sequential evaluation.
}
\item{simdist}{Character string indicating the simulation
distribution. The default is \code{"multinomial"}, which
conditions on the total number of cases observed. The
other options are \code{"poisson"} and \code{"binomial"}}
\item{pop}{The population size associated with each
region.}
\item{min.cases}{The minimum number of cases required for
a cluster. The default is 2.}
\item{ldup}{A logical vector indicating positions of duplicated zones. Not
intended for user use.}
\item{lseq_zones}{A list of logical vectors specifying the sequence of
relevant zones based on ubpop constraints}
}
\value{
A list with the maximum statistic for each population upperbound for
each simulated data set. Each element will have a vector of maximums for
each simulated data set corresponding to the sequence of ubpop values. The
list will have \code{nsim} elements.
}
\description{
\code{seq_scan_sim} efficiently performs \code{\link{scan.test}} on a
simulated data set. The function is meant to be used internally by the
\code{\link{optimal_ubpop}} function in the smerc package.
}
\keyword{internal}
|
/Formação Cientista de Dados/Atividades/Estatística II em R/7 - Regressão Linear Simples PARTE II.R | no_license | lucianofbn/Data-Scientist | R | false | false | 379 | r | ||
#' msrat: Metrics-Based Software Reliability Assessment Tool
#'
#' This package provides estimation programs for metrics-based software
#' reliability growth models with logistic regression for dynamic metrics
#' and Poisson regression for static metrics.
#'
#' @docType package
#' @name msrat
#' @import R6 Rsrat
#' @importFrom stats nobs model.frame model.response model.matrix gaussian
#' @importFrom Matrix Matrix
#' @importFrom Rcpp sourceCpp
#' @useDynLib msrat
NULL
| /R/package.R | permissive | SwReliab/msrat | R | false | false | 473 | r | #' msrat: Metrics-Based Software Reliability Assessment Tool
#'
#' This package provides estimation programs for metrics-based software
#' reliability growth models with logistic regression for dynamic metrics
#' and Poisson regression for static metrics.
#'
#' @docType package
#' @name msrat
#' @import R6 Rsrat
#' @importFrom stats nobs model.frame model.response model.matrix gaussian
#' @importFrom Matrix Matrix
#' @importFrom Rcpp sourceCpp
#' @useDynLib msrat
NULL
|
MMRtime <- function(x, d, age) {
#### estimate Mean/Median Residual lifetime over age.
temp <- WKM( x=x , d=d )
tivec <- temp$times
pivec <- temp$jump
if( age >= tivec[length(tivec)] ) stop("age too large")
if( age < tivec[1] ) warning("age smaller than first event time")
pivec[ tivec < age ] <- 0
Sage <- sum( pivec )
fenzi <- sum( (tivec - age)*pivec )
MRtime <- fenzi/Sage
Ptheta <- Sage/2
Cprob <- cumsum(pivec)
posi <- sum(Cprob < Ptheta)
theta <- tivec[posi+1]
list(MeanResidual = MRtime, MedianResidual = theta - age)
}
| /R/MMRtime.R | no_license | cran/emplik | R | false | false | 534 | r | MMRtime <- function(x, d, age) {
#### estimate Mean/Median Residual lifetime over age.
temp <- WKM( x=x , d=d )
tivec <- temp$times
pivec <- temp$jump
if( age >= tivec[length(tivec)] ) stop("age too large")
if( age < tivec[1] ) warning("age smaller than first event time")
pivec[ tivec < age ] <- 0
Sage <- sum( pivec )
fenzi <- sum( (tivec - age)*pivec )
MRtime <- fenzi/Sage
Ptheta <- Sage/2
Cprob <- cumsum(pivec)
posi <- sum(Cprob < Ptheta)
theta <- tivec[posi+1]
list(MeanResidual = MRtime, MedianResidual = theta - age)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TranscriptsRelated.R
\name{remove.verylong.isoform.gff}
\alias{remove.verylong.isoform.gff}
\title{Remove isforms with extremly long interval (isoform.End - isoform.Start)}
\usage{
remove.verylong.isoform.gff(gff, interval.length = 1500000)
}
\arguments{
\item{gff}{gff file path}
\item{interval.length}{the maximum length between isoform start and isoform end}
}
\value{
}
\description{
Remove isforms with extremly long interval (isoform.End - isoform.Start)
}
\examples{
remove.verylong.isoform.gff("/data/home2/Zhongxu/work/3.filter.gtf")
}
| /man/remove.verylong.isoform.gff.Rd | permissive | siegmundwang/loonR | R | false | true | 625 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TranscriptsRelated.R
\name{remove.verylong.isoform.gff}
\alias{remove.verylong.isoform.gff}
\title{Remove isforms with extremly long interval (isoform.End - isoform.Start)}
\usage{
remove.verylong.isoform.gff(gff, interval.length = 1500000)
}
\arguments{
\item{gff}{gff file path}
\item{interval.length}{the maximum length between isoform start and isoform end}
}
\value{
}
\description{
Remove isforms with extremly long interval (isoform.End - isoform.Start)
}
\examples{
remove.verylong.isoform.gff("/data/home2/Zhongxu/work/3.filter.gtf")
}
|
library(plumber)
plumb("./server/api.R") %>%
pr_static("/", "../client/public") %>%
pr_set_docs(FALSE) %>%
pr_run(host="0.0.0.0", port=80)
| /runApp.R | permissive | BigDataGrapes-EU/d5.3-gacovi | R | false | false | 149 | r | library(plumber)
plumb("./server/api.R") %>%
pr_static("/", "../client/public") %>%
pr_set_docs(FALSE) %>%
pr_run(host="0.0.0.0", port=80)
|
#'@export
#' @importFrom graphics plot
plot.generalize <- function(x, ...){
plot(x$samples, ...)
}
#'@export
#'@importFrom graphics hist
hist.generalize <- function(x, var = "P11s",
xlab = var,
breaks = 50,
main = "",
yaxt = "n",
ylab = "",
col = "lightgray",
lims = NULL,
...){
sim.bounds <- do.call("rbind", x$samples)
sim.bounds <- as.data.frame(sim.bounds)
if(is.null(lims)){
lims <- range(sim.bounds[[var]])
}
hist(sim.bounds[[var]],
breaks = breaks,
xlim = lims,
yaxt = yaxt,
xlab = xlab,
ylab = ylab,
main = main,
col = col,
...)
}
| /R/plot.R | no_license | carloscinelli/generalizing | R | false | false | 845 | r | #'@export
#' @importFrom graphics plot
plot.generalize <- function(x, ...){
plot(x$samples, ...)
}
#'@export
#'@importFrom graphics hist
hist.generalize <- function(x, var = "P11s",
xlab = var,
breaks = 50,
main = "",
yaxt = "n",
ylab = "",
col = "lightgray",
lims = NULL,
...){
sim.bounds <- do.call("rbind", x$samples)
sim.bounds <- as.data.frame(sim.bounds)
if(is.null(lims)){
lims <- range(sim.bounds[[var]])
}
hist(sim.bounds[[var]],
breaks = breaks,
xlim = lims,
yaxt = yaxt,
xlab = xlab,
ylab = ylab,
main = main,
col = col,
...)
}
|
#K-Nearest Neighbour Classifier
#Implement a KNN model to classify the animals in to categories
#Loading the packages
library(caret)
library(gmodels)
library(class)
#Import the Data
zoo <- read.csv("E:/Data science Excelr/Assigments/KNN/Zoo/Zoo.csv")
attach(zoo)
View(zoo)
summary(zoo)#Get the Structure of the Dataset
#The data is in matrix form so we aint performing any Eda
str(zoo)
#Now We want the Type column to be a Categorical Variable as we have to Predict the Type of the Animal Category,so lets Convert it into Categorial Variable
type <- as.factor(zoo$type)
str(zoo)
#Now the type is converted into factor with 7 level
#Lets Normalize the Data
#Lets Derive a Function for Nomalize the Data
normalise <- function(x)
{
return((x - min(x))/(max(x) - min(x)))
}
#Lets Apply the Function on the Data
zoo_n <- as.data.frame(lapply(zoo[-17], normalise))
zoo_norm <- cbind(zoo_n,type)
str(zoo_norm)
#Lets Create Data Partition for defining Training and Testing Sets
#I will Take 60% of the Total Dataset Randomly for Training set and the rest 40%in the Testing set
part <- createDataPartition(zoo_norm$type, p=.60, list = F)
training <- zoo_norm[part,]
testing <- zoo_norm[-part,]
str(training)
#Creating a loop to find the optimum k value
i=1
k.optm=1
for (i in 1:20) {
knn.mod <-knn(train = training, test = testing,cl = training[,17],k=i)
k.optm[i] <- 100*sum(training[,17] == knn.mod)/NROW(training[,17])
k=i
cat(k,"=",k.optm[i],'\n')
}
plot(k.optm,type = "b")
#As seen in plot values doesnt make much difference still we ll explore
#Now Lets Build the KNN Classifier Model
#For k=1
knn1 <-knn(train = training[,-17], test = testing[,-17], cl = training$type, k=1)
CrossTable(testing$type,knn1, prop.r = F, prop.c = F, prop.chisq = F)
tab1 <- table(testing$type, knn1)
Acc1 <- round((sum(diag(tab1))/sum(tab1))*100, digits = 2)
Acc1
#For k=1 the Accuracy is 94.87%
#For k=3
knn3 <-knn(train = training[,-17], test = testing[,-17], cl = training$type, k=3)
CrossTable(testing$type,knn3, prop.r = F, prop.c = F, prop.chisq = F)
tab3 <- table(testing$type, knn3)
Acc3 <- round((sum(diag(tab3))/sum(tab3))*100, digits = 2)
Acc3
#For k=3 the Accuracy is 92.31%
#For k=5
knn5 <-knn(train = training[,-17], test = testing[,-17], cl = training$type, k=5)
CrossTable(testing$type,knn5, prop.r = F, prop.c = F, prop.chisq = F)
tab5 <- table(testing$type, knn5)
Acc5 <- round((sum(diag(tab5))/sum(tab5))*100, digits = 2)
Acc5
#For k=5 the Accuracy is 89.74%
#For K=7
knn7 <-knn(train = training[,-17], test = testing[,-17], cl = training$type, k=7)
CrossTable(testing$type,knn7, prop.r = F, prop.c = F, prop.chisq = F)
tab7 <- table(testing$type, knn7)
Acc7 <- round((sum(diag(tab7))/sum(tab7))*100, digits = 2)
Acc7
#For k=7 the Accuracy is 87.14%
#for K=21
knn21 <-knn(train = training[,-17], test = testing[,-17], cl = training$type, k=21)
CrossTable(testing$type,knn21, prop.r = F, prop.c = F, prop.chisq = F)
tab21 <- table(testing$type, knn21)
Acc21 <- round((sum(diag(tab21))/sum(tab21))*100, digits = 2)
Acc21
#For k=21 the Accuracy is 79.49%
#WE saw that K=1 gave more accuracy than others and accuray is kind of decreasing as we increase k
#Lets do it by Scaling the Datapoints using Scale() function.
zoo_scale <- as.data.frame(scale(zoo[,-17]))
type <- as.factor(zoo$type)
zoo_scale_data <- cbind(zoo_scale, type)
str(zoo_scale_data)
#Lets Create Data Partition for defining Training and Testing Sets
#I will Take 60% of the Total Dataset Randomly for Training set and the rest 40%in the Testing set
part1 <- createDataPartition(zoo_norm$type, p=.60, list = F)
train_scaled <- zoo_norm[part1,]
test_scaled <- zoo_norm[-part1,]
j=1
k.optm1=1
for (j in 1:30) {
knn.mod1<-knn(train = train_scaled, test = test_scaled,cl = train_scaled[,17],k=j)
k.optm1[j] <- 100*sum(train_scaled[,17] == knn.mod1)/NROW(train_scaled[,17])
k=j
cat(k,"=",k.optm1[j],'\n')
}
plot(k.optm1,type = "b")
#Lets Build the KNN Classifier Model for Scaled Values
#For K=1
knn1scale <- knn(train = train_scaled[,-17], test = test_scaled[,-17], cl = train_scaled[,17], k=1) #cl stands for Classification
#Lets Evaluate the Accuracy of the Model
CrossTable(test_scaled$type,knn1scale, prop.r = F, prop.c = F, prop.chisq = F)
tabscale1 <- table(test_scaled$type,knn1scale)
Acc_Scale1 <- round(sum(diag(tabscale1))/sum(tabscale1)*100, digits = 2) #Here Digits attribute Specifies the Number of digits after Decimal Point
Acc_Scale1
# For K= 1 accyracy is 97.44%
#For K=11
knn11scale <- knn(train = train_scaled[,-17], test = test_scaled[,-17], cl = train_scaled[,17], k=11) #cl stands for Classification
#Lets Evaluate the Accuracy of the Model
CrossTable(test_scaled$type,knn11scale, prop.r = F, prop.c = F, prop.chisq = F)
tabscale11 <- table(test_scaled$type,knn11scale)
Acc_Scale11 <- round(sum(diag(tabscale11))/sum(tabscale11)*100, digits = 2) #Here Digits attribute Specifies the Number of digits after Decimal Point
Acc_Scale11 #92.05%
#For K=25
knn25scale <- knn(train = train_scaled[,-17], test = test_scaled[,-17], cl = train_scaled[,17], k=25) #cl stands for Classification
#Lets Evaluate the Accuracy of the Model
CrossTable(test_scaled$type,knn25scale, prop.r = F, prop.c = F, prop.chisq = F)
tabscale25 <- table(test_scaled$type,knn25scale)
Acc_Scale25 <- round(sum(diag(tabscale25))/sum(tabscale25)*100, digits = 2) #Here Digits attribute Specifies the Number of digits after Decimal Point
Acc_Scale25 #69.23 %
#For K=30
knn30scale <- knn(train = train_scaled[,-17], test = test_scaled[,-17], cl = train_scaled[,17], k=30) #cl stands for Classification
#Lets Evaluate the Accuracy of the Model
CrossTable(test_scaled$type,knn30scale, prop.r = F, prop.c = F, prop.chisq = F)
tabscale30 <- table(test_scaled$type,knn30scale)
Acc_Scale30 <- round(sum(diag(tabscale30))/sum(tabscale30)*100, digits = 2) #Here Digits attribute Specifies the Number of digits after Decimal Point
Acc_Scale30 #66.67 %
#Here we see same thing as we are increasing our k value accuracy is decreasing.
#So we could conclude that model with K = 1 has best accuracy of all and will classify the animals categories best that others
| /3. Algorithms/Supervised Machine Learning Algorithms/KNN/Zoo/Zoo.R | no_license | Nikita9779/Datascience_R_codes | R | false | false | 6,177 | r | #K-Nearest Neighbour Classifier
#Implement a KNN model to classify the animals in to categories
#Loading the packages
library(caret)
library(gmodels)
library(class)
#Import the Data
zoo <- read.csv("E:/Data science Excelr/Assigments/KNN/Zoo/Zoo.csv")
attach(zoo)
View(zoo)
summary(zoo)#Get the Structure of the Dataset
#The data is in matrix form so we aint performing any Eda
str(zoo)
#Now We want the Type column to be a Categorical Variable as we have to Predict the Type of the Animal Category,so lets Convert it into Categorial Variable
type <- as.factor(zoo$type)
str(zoo)
#Now the type is converted into factor with 7 level
#Lets Normalize the Data
#Lets Derive a Function for Nomalize the Data
normalise <- function(x)
{
return((x - min(x))/(max(x) - min(x)))
}
#Lets Apply the Function on the Data
zoo_n <- as.data.frame(lapply(zoo[-17], normalise))
zoo_norm <- cbind(zoo_n,type)
str(zoo_norm)
#Lets Create Data Partition for defining Training and Testing Sets
#I will Take 60% of the Total Dataset Randomly for Training set and the rest 40%in the Testing set
part <- createDataPartition(zoo_norm$type, p=.60, list = F)
training <- zoo_norm[part,]
testing <- zoo_norm[-part,]
str(training)
#Creating a loop to find the optimum k value
i=1
k.optm=1
for (i in 1:20) {
knn.mod <-knn(train = training, test = testing,cl = training[,17],k=i)
k.optm[i] <- 100*sum(training[,17] == knn.mod)/NROW(training[,17])
k=i
cat(k,"=",k.optm[i],'\n')
}
plot(k.optm,type = "b")
#As seen in plot values doesnt make much difference still we ll explore
#Now Lets Build the KNN Classifier Model
#For k=1
knn1 <-knn(train = training[,-17], test = testing[,-17], cl = training$type, k=1)
CrossTable(testing$type,knn1, prop.r = F, prop.c = F, prop.chisq = F)
tab1 <- table(testing$type, knn1)
Acc1 <- round((sum(diag(tab1))/sum(tab1))*100, digits = 2)
Acc1
#For k=1 the Accuracy is 94.87%
#For k=3
knn3 <-knn(train = training[,-17], test = testing[,-17], cl = training$type, k=3)
CrossTable(testing$type,knn3, prop.r = F, prop.c = F, prop.chisq = F)
tab3 <- table(testing$type, knn3)
Acc3 <- round((sum(diag(tab3))/sum(tab3))*100, digits = 2)
Acc3
#For k=3 the Accuracy is 92.31%
#For k=5
knn5 <-knn(train = training[,-17], test = testing[,-17], cl = training$type, k=5)
CrossTable(testing$type,knn5, prop.r = F, prop.c = F, prop.chisq = F)
tab5 <- table(testing$type, knn5)
Acc5 <- round((sum(diag(tab5))/sum(tab5))*100, digits = 2)
Acc5
#For k=5 the Accuracy is 89.74%
#For K=7
knn7 <-knn(train = training[,-17], test = testing[,-17], cl = training$type, k=7)
CrossTable(testing$type,knn7, prop.r = F, prop.c = F, prop.chisq = F)
tab7 <- table(testing$type, knn7)
Acc7 <- round((sum(diag(tab7))/sum(tab7))*100, digits = 2)
Acc7
#For k=7 the Accuracy is 87.14%
#for K=21
knn21 <-knn(train = training[,-17], test = testing[,-17], cl = training$type, k=21)
CrossTable(testing$type,knn21, prop.r = F, prop.c = F, prop.chisq = F)
tab21 <- table(testing$type, knn21)
Acc21 <- round((sum(diag(tab21))/sum(tab21))*100, digits = 2)
Acc21
#For k=21 the Accuracy is 79.49%
#WE saw that K=1 gave more accuracy than others and accuray is kind of decreasing as we increase k
#Lets do it by Scaling the Datapoints using Scale() function.
zoo_scale <- as.data.frame(scale(zoo[,-17]))
type <- as.factor(zoo$type)
zoo_scale_data <- cbind(zoo_scale, type)
str(zoo_scale_data)
#Lets Create Data Partition for defining Training and Testing Sets
#I will Take 60% of the Total Dataset Randomly for Training set and the rest 40%in the Testing set
part1 <- createDataPartition(zoo_norm$type, p=.60, list = F)
train_scaled <- zoo_norm[part1,]
test_scaled <- zoo_norm[-part1,]
j=1
k.optm1=1
for (j in 1:30) {
knn.mod1<-knn(train = train_scaled, test = test_scaled,cl = train_scaled[,17],k=j)
k.optm1[j] <- 100*sum(train_scaled[,17] == knn.mod1)/NROW(train_scaled[,17])
k=j
cat(k,"=",k.optm1[j],'\n')
}
plot(k.optm1,type = "b")
#Lets Build the KNN Classifier Model for Scaled Values
#For K=1
knn1scale <- knn(train = train_scaled[,-17], test = test_scaled[,-17], cl = train_scaled[,17], k=1) #cl stands for Classification
#Lets Evaluate the Accuracy of the Model
CrossTable(test_scaled$type,knn1scale, prop.r = F, prop.c = F, prop.chisq = F)
tabscale1 <- table(test_scaled$type,knn1scale)
Acc_Scale1 <- round(sum(diag(tabscale1))/sum(tabscale1)*100, digits = 2) #Here Digits attribute Specifies the Number of digits after Decimal Point
Acc_Scale1
# For K= 1 accyracy is 97.44%
#For K=11
knn11scale <- knn(train = train_scaled[,-17], test = test_scaled[,-17], cl = train_scaled[,17], k=11) #cl stands for Classification
#Lets Evaluate the Accuracy of the Model
CrossTable(test_scaled$type,knn11scale, prop.r = F, prop.c = F, prop.chisq = F)
tabscale11 <- table(test_scaled$type,knn11scale)
Acc_Scale11 <- round(sum(diag(tabscale11))/sum(tabscale11)*100, digits = 2) #Here Digits attribute Specifies the Number of digits after Decimal Point
Acc_Scale11 #92.05%
#For K=25
knn25scale <- knn(train = train_scaled[,-17], test = test_scaled[,-17], cl = train_scaled[,17], k=25) #cl stands for Classification
#Lets Evaluate the Accuracy of the Model
CrossTable(test_scaled$type,knn25scale, prop.r = F, prop.c = F, prop.chisq = F)
tabscale25 <- table(test_scaled$type,knn25scale)
Acc_Scale25 <- round(sum(diag(tabscale25))/sum(tabscale25)*100, digits = 2) #Here Digits attribute Specifies the Number of digits after Decimal Point
Acc_Scale25 #69.23 %
#For K=30
knn30scale <- knn(train = train_scaled[,-17], test = test_scaled[,-17], cl = train_scaled[,17], k=30) #cl stands for Classification
#Lets Evaluate the Accuracy of the Model
CrossTable(test_scaled$type,knn30scale, prop.r = F, prop.c = F, prop.chisq = F)
tabscale30 <- table(test_scaled$type,knn30scale)
Acc_Scale30 <- round(sum(diag(tabscale30))/sum(tabscale30)*100, digits = 2) #Here Digits attribute Specifies the Number of digits after Decimal Point
Acc_Scale30 #66.67 %
#Here we see same thing as we are increasing our k value accuracy is decreasing.
#So we could conclude that model with K = 1 has best accuracy of all and will classify the animals categories best that others
|
library(here)
library(tidyverse)
library(ggdist)
theme_set(theme_minimal())
qlt <- readRDS(here("data", "SDE_QLTY_total.rds")) %>%
mutate(bird_sp = ifelse(bird_sp == "Sylvia communis", "Curruca communis", bird_sp),
bird_sp = ifelse(bird_sp == "Sylvia melanocephala", "Curruca melanocephala", bird_sp),
bird_sp = ifelse(bird_sp == "Sylvia undata", "Curruca undata", bird_sp),
bird_sp = ifelse(bird_sp == "Sylvia hortensis", "Curruca hortensis", bird_sp),
bird_sp = ifelse(bird_sp == "Sylvia cantillans", "Curruca cantillans", bird_sp)
)
qlt %>%
ggplot() +
stat_pointinterval(aes(P.recruit, reorder(bird_sp, P.recruit, FUN = median)),
.width = c(0.66, 0.95),
point_size = 2, colour = "grey30") +
labs(x = "\nProbability of recruitment per consumed fruit",
y = "",
# caption = "Birs species sorted by decreasing median probability (dots). Intervals represent 0.66 and 0.95 credibility intervals"
) +
theme(axis.title.x = element_text(size = rel(1.3)),
plot.caption = element_text(colour = "grey30"),
axis.text.y = element_text(face = "italic"))
# qlt %>%
# ggplot() +
# stat_slab(aes(P.recruit, bird_sp), normalize = "groups") +
# coord_cartesian(xlim = c(0, 0.002))
ggsave(here("analysis", "output", "Figures", "SDE_QLTY.pdf"),
width = 7, height = 9, units = "in")
ggsave(here("analysis", "output", "Figures", "SDE_QLTY.png"),
width = 7, height = 9, units = "in", dpi = 600, bg = "white")
| /code/figscode/Fig_SDE_QLTY.R | permissive | PJordano-Lab/MS_effectiveness | R | false | false | 1,554 | r |
library(here)
library(tidyverse)
library(ggdist)
theme_set(theme_minimal())
qlt <- readRDS(here("data", "SDE_QLTY_total.rds")) %>%
mutate(bird_sp = ifelse(bird_sp == "Sylvia communis", "Curruca communis", bird_sp),
bird_sp = ifelse(bird_sp == "Sylvia melanocephala", "Curruca melanocephala", bird_sp),
bird_sp = ifelse(bird_sp == "Sylvia undata", "Curruca undata", bird_sp),
bird_sp = ifelse(bird_sp == "Sylvia hortensis", "Curruca hortensis", bird_sp),
bird_sp = ifelse(bird_sp == "Sylvia cantillans", "Curruca cantillans", bird_sp)
)
qlt %>%
ggplot() +
stat_pointinterval(aes(P.recruit, reorder(bird_sp, P.recruit, FUN = median)),
.width = c(0.66, 0.95),
point_size = 2, colour = "grey30") +
labs(x = "\nProbability of recruitment per consumed fruit",
y = "",
# caption = "Birs species sorted by decreasing median probability (dots). Intervals represent 0.66 and 0.95 credibility intervals"
) +
theme(axis.title.x = element_text(size = rel(1.3)),
plot.caption = element_text(colour = "grey30"),
axis.text.y = element_text(face = "italic"))
# qlt %>%
# ggplot() +
# stat_slab(aes(P.recruit, bird_sp), normalize = "groups") +
# coord_cartesian(xlim = c(0, 0.002))
ggsave(here("analysis", "output", "Figures", "SDE_QLTY.pdf"),
width = 7, height = 9, units = "in")
ggsave(here("analysis", "output", "Figures", "SDE_QLTY.png"),
width = 7, height = 9, units = "in", dpi = 600, bg = "white")
|
set.seed( 48 )
library(mvtnorm)
library(fields)
library(Rcpp)
library(mclust)
library(kernlab)
library(ConsensusClusterPlus)
simu=function(s){
prob_glcm<-function(c,s=s,mc=30000){
mu<-c(2+c,14-c)
sigma<-matrix(s*c(1,-0.7,-0.7,1),nrow=2)
elip<-rmvnorm(mc,mu,sigma)
# par(xaxs='i',yaxs='i')
# plot(elip,xlim =c(0,16) ,ylim=c(0,16))
# abline(16,-1,col='red')
# abline(h=16);abline(h=15);abline(h=14);abline(h=13);abline(h=12);abline(h=11);abline(h=10);abline(h=9);
# abline(h=8);abline(h=7);abline(h=6);abline(h=5);abline(h=4);abline(h=3);abline(h=2);abline(h=1);abline(h=0)
# abline(v=16);abline(v=15);abline(v=14);abline(v=13);abline(v=12);abline(v=11);abline(v=10);abline(v=9);
# abline(v=0);abline(v=1);abline(v=2);abline(v=3);abline(v=4);abline(v=5);abline(v=6);abline(v=7);abline(v=8)
cell_count<-rep(0,16*16)
for (i in 1:mc)
{
for (m in 1:16) {
for (k in 16:1) {
if (( (m-1) <elip[i,1])&(elip[i,1]< m)&( (k-1) <elip[i,2])&(elip[i,2]< k)) {
cell_count[16-k+1+16*(m-1)]=cell_count[16-k+1+16*(m-1)]+1}
}
}
}
## -c(2:16,19:32,36:48,53:64,70:80,87:96,104:112,121:128,138:144,155:160,172:176,189:192,206:208,223:224,240)
z<-cell_count/sum(cell_count)
z_whole<-z[c(1,17,33,49,65,81,97,113,129,145,161,177,193,209,225,241,
17,18,34,50,66,82,98,114,130,146,162,178,194,210,226,242,
33,34,35,51,67,83,99,115,131,147,163,179,195,211,227,243,
49,50,51,52,68,84,100,116,132,148,164,180,196,212,228,244,
65,66,67,68,69,85,101,117,133,149,165,181,197,213,229,245,
81,82,83,84,85,86,102,118,134,150,166,182,198,214,230,246,
97,98,99,100,101,102,103,119,135,151,167,183,199,215,231,247,
113,114,115,116,117,118,119,120,136,152,168,184,200,216,232,248,
129,130,131,132,133,134,135,136,137,153,169,185,201,217,233,249,
145,146,147,148,149,150,151,152,153,154,170,186,202,218,234,250,
161,162,163,164,165,166,167,168,169,170,171,187,203,219,235,251,
177,178,179,180,181,182,183,184,185,186,187,188,204,220,236,252,
193,194,195,196,197,198,199,200,201,202,203,204,205,221,237,253,
209,210,211,212,213,214,215,216,217,218,219,220,221,222,238,254,
225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,255,
241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256)]
arg. <- expand.grid(c(0.5:15.5),c(15.5:0.5))
I = as.image( Z=z_whole, x=arg., grid=list(x=seq(0.5,15.5,1), y=seq(0.5,15.5,1)))
image(I)
smooth.I <- image.smooth(I, theta=1);
#################################################
### notice the order of this sommthed image ###
#################################################
den=c()
for (r in 1:16) {
for (w in 1:r) {
den=c(den,smooth.I$z[r,16-(w-1)])
}
}
prob<-den/sum(den)
return(prob)
}
prob1=prob_glcm(c=5,s=s)
prob2=prob_glcm(c=5.5,s=s)
prob3=prob_glcm(c=6,s=s)
prob4=prob_glcm(c=6.5,s=s)
prob5=prob_glcm(c=7,s=s)
glcm=matrix(0,nrow=20*5,ncol=136)
for (j in 1:20)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob1)
}
for (j in 21:40)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob2)
}
for (j in 41:60)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob3)
}
for (j in 61:80)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob4)
}
for (j in 81:100)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob5)
}
glcm
}
Z=simu(s=15)
Z_met=Z
T_met=nrow(Z_met)
n=ncol(Z_met)
X=apply(Z_met,1,sum)
X_met=X
sX_met=(X-mean(X))/sd(X)
R=array(data = NA,dim = c(2,n,T_met))
for (t in 1: nrow(Z_met)) R[,,t]=matrix(rep(c(1,sX_met[t]),times=n),byrow = FALSE,nrow=2,ncol=n)
############################################################################
########################## MCMC ########################
############################################################################
library(HI)
library(invgamma)
source('/gstore/scratch/u/lix233/RGSDP/sdp_functions_selfwriting_V12_cpp.R')
sourceCpp('/gstore/scratch/u/lix233/RGSDP/rgsdp.cpp')
D=read.csv('/gstore/scratch/u/lix233/RGSDP/D_16.csv',header=TRUE)
W=read.csv('/gstore/scratch/u/lix233/RGSDP/W_16.csv',header=TRUE)
N=20000;Burnin=N/2
Y_iter_met=Theta_iter_met=array(data=NA,dim = c(T_met,n,N))
try=matrix(0,nrow =T_met ,ncol = n)
for (i in 1:T_met){
for (j in 1:n){
if (Z_met[i,j]==0) {
try[i,j]=rnorm(1,mean=-10,sd=1)
} else {
try[i,j]=rnorm(1,mean=Z_met[i,j],sd=1)
}
}
}
g=update_Y(Z=Z_met,X=X_met,tau2=100,Theta = try,Beta =c(0.1,0.1),R)
sum(g==Inf)+sum(g==-Inf)
Theta_iter_met[,,1]=try
tau2_met=v_met=rho_met=sig2_met=rep(NA,N)
tau2_met[1]=50
v_met[1]=0.8
rho_met[1]=0.9
sig2_met[1]=10
# v_met=rep(1,N) # Fix v
av=bv=1
atau=0.0001 ;btau=0.0001
asig=0.0001 ;bsig=0.0001
Betam=c(0,0);Sigma_m=matrix(c(10^5,0,0,10^5),nrow=2,ncol=2)
Beta_iter_met=matrix(NA,nrow=N,ncol=nrow(R[,,1]))
Beta_iter_met[1,]=c(40,20)
for (iter in 2:N) {
Y_iter_met[,,iter]=update_Y(Z_met,X_met,tau2_met[iter-1],Theta_iter_met[,,iter-1],Beta_iter_met[iter-1,],R)
Theta_iter_met[,,iter]=update_theta(as.vector(X_met),Y_iter_met[,,iter],as.matrix(D),as.matrix(W),rho_met[iter-1],Theta_iter_met[,,iter-1],sig2_met[iter-1],tau2_met[iter-1],v_met[iter-1],Beta_iter_met[iter-1,],R)
Beta_iter_met[iter,]=update_Beta(Betam,Sigma_m,tau2_met[iter-1],X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],R)
tau2_met[iter] = update_tau2(X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],atau,btau,Beta_iter_met[iter,],R)
sig2_met[iter]= update_sig2(asig,bsig,D,W,rho_met[iter-1],Theta_iter_met[,,iter])
rho_met[iter] = update_rho(D,W,Theta_iter_met[,,iter],sig2_met[iter])
v_met[iter]=update_v(Z_met,v_met[iter-1],Tstar=nrow(unique.matrix(Theta_iter_met[,,iter])),av,bv)
}
library(coda)
mcmc_beta=mcmc(Beta_iter_met[(1+Burnin):N,])
pnorm(abs(geweke.diag(mcmc_beta)$z),lower.tail=FALSE)*2
mcmc_rho=mcmc(rho_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_rho)$z),lower.tail=FALSE)*2
mcmc_sig2=mcmc(sig2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_sig2)$z),lower.tail=FALSE)*2
mcmc_tau2=mcmc(tau2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_tau2)$z),lower.tail=FALSE)*2
mcmc_v=mcmc(v_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_v)$z),lower.tail=FALSE)*2
Theta_ave=Theta_sum=matrix(0,nrow=nrow(Theta_iter_met[,,1]),ncol=ncol(Theta_iter_met[,,1]))
for (i in (Burnin+1):N) {
Theta_sum=Theta_sum+Theta_iter_met[,,i]
}
Theta_ave=Theta_sum/(N-Burnin)
library('NbClust')
NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')
HRGSDP=NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')$Best.partition
glcm_whole=Z[,c(1,2,4,7,11,16,22,29,37,46,56,67,79,92,106,121,
2,3,5,8,12,17,23,30,38,47,57,68,80,93,107,122,
4,5,6,9,13,18,24,31,39,48,58,69,81,94,108,123,
7,8,9,10,14,19,25,32,40,49,59,70,82,95,109,124,
11,12,13,14,15,20,26,33,41,50,60,71,83,96,110,125,
16,17,18,19,20,21,27,34,42,51,61,72,84,97,111,126,
22,23,24,25,26,27,28,35,43,52,62,73,85,98,112,127,
29,30,31,32,33,34,35,36,44,53,63,74,86,99,113,128,
37,38,39,40,41,42,43,44,45,54,64,75,87,100,114,129,
46,47,48,49,50,51,52,53,54,55,65,76,88,101,115,130,
56,57,58,59,60,61,62,63,64,65,66,77,89,102,116,131,
67,68,69,70,71,72,73,74,75,76,77,78,90,103,117,132,
79,80,81,82,83,84,85,86,87,88,89,90,91,104,118,133,
92,93,94,95,96,97,98,99,100,101,102,103,104,105,119,134,
106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,135,
121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136)]
source('/gstore/scratch/u/lix233/RGSDP/cal_stat.R')
features=cal_stat(glcm_whole)
GMM=Mclust(features,5)
my.dist <- function(x) dist(x, method='euclidean')
my.hclust <- function(d) hclust(d, method='ward.D2')
HC<-cutree(my.hclust(my.dist(data.matrix(features))),k=5)
KM=kmeans(features,5)
SC=specc(features,5)
CO <- ConsensusClusterPlus(t(features),maxK=9,reps=100,pItem=0.90, pFeature=1,
clusterAlg='hc',distance='euclidean',plot=FALSE)
CO <- CO[[5]]$consensusClass
aa <- table(rep(1:5,each=20), CO)
bb <- table(rep(1:5,each=20), GMM$classification)
cc <- table(rep(1:5,each=20), HC)
dd <- table(rep(1:5,each=20), KM$cluster)
ee <- table(rep(1:5,each=20), SC)
ff <- table(rep(1:5,each=20), HRGSDP)
res_FeaCO=c(chisq.test(aa,correct = TRUE)$statistic,ncol(aa),error_rate(aa), 'FeaCO')
res_FeaGMM=c(chisq.test(bb,correct = TRUE)$statistic,ncol(bb),error_rate(bb), 'FeaGMM')
res_FeaHC=c(chisq.test(cc,correct = TRUE)$statistic,ncol(cc),error_rate(cc), 'FeaHC')
res_FeaKM=c(chisq.test(dd,correct = TRUE)$statistic,ncol(dd),error_rate(dd), 'FeaKM')
res_FeaSC=c(chisq.test(ee,correct = TRUE)$statistic,ncol(ee),error_rate(ee), 'FeaSC')
res_HRGSDP=c(chisq.test(ff,correct = TRUE)$statistic,ncol(ff),error_rate(ff), 'HRGSDP')
xx = rbind(res_FeaCO, res_FeaGMM, res_FeaHC, res_FeaKM, res_FeaSC, res_HRGSDP)
colnames(xx) = c('pearson.chi.sq', 'nunber of clusters', 'error.rate', 'method')
xx = as.data.frame(xx)
print(xx)
| /s=15/simu_48.R | no_license | mguindanigroup/Radiomics-Hierarchical-Rounded-Gaussian-Spatial-Dirichlet-Process | R | false | false | 9,293 | r | set.seed( 48 )
library(mvtnorm)
library(fields)
library(Rcpp)
library(mclust)
library(kernlab)
library(ConsensusClusterPlus)
simu=function(s){
prob_glcm<-function(c,s=s,mc=30000){
mu<-c(2+c,14-c)
sigma<-matrix(s*c(1,-0.7,-0.7,1),nrow=2)
elip<-rmvnorm(mc,mu,sigma)
# par(xaxs='i',yaxs='i')
# plot(elip,xlim =c(0,16) ,ylim=c(0,16))
# abline(16,-1,col='red')
# abline(h=16);abline(h=15);abline(h=14);abline(h=13);abline(h=12);abline(h=11);abline(h=10);abline(h=9);
# abline(h=8);abline(h=7);abline(h=6);abline(h=5);abline(h=4);abline(h=3);abline(h=2);abline(h=1);abline(h=0)
# abline(v=16);abline(v=15);abline(v=14);abline(v=13);abline(v=12);abline(v=11);abline(v=10);abline(v=9);
# abline(v=0);abline(v=1);abline(v=2);abline(v=3);abline(v=4);abline(v=5);abline(v=6);abline(v=7);abline(v=8)
cell_count<-rep(0,16*16)
for (i in 1:mc)
{
for (m in 1:16) {
for (k in 16:1) {
if (( (m-1) <elip[i,1])&(elip[i,1]< m)&( (k-1) <elip[i,2])&(elip[i,2]< k)) {
cell_count[16-k+1+16*(m-1)]=cell_count[16-k+1+16*(m-1)]+1}
}
}
}
## -c(2:16,19:32,36:48,53:64,70:80,87:96,104:112,121:128,138:144,155:160,172:176,189:192,206:208,223:224,240)
z<-cell_count/sum(cell_count)
z_whole<-z[c(1,17,33,49,65,81,97,113,129,145,161,177,193,209,225,241,
17,18,34,50,66,82,98,114,130,146,162,178,194,210,226,242,
33,34,35,51,67,83,99,115,131,147,163,179,195,211,227,243,
49,50,51,52,68,84,100,116,132,148,164,180,196,212,228,244,
65,66,67,68,69,85,101,117,133,149,165,181,197,213,229,245,
81,82,83,84,85,86,102,118,134,150,166,182,198,214,230,246,
97,98,99,100,101,102,103,119,135,151,167,183,199,215,231,247,
113,114,115,116,117,118,119,120,136,152,168,184,200,216,232,248,
129,130,131,132,133,134,135,136,137,153,169,185,201,217,233,249,
145,146,147,148,149,150,151,152,153,154,170,186,202,218,234,250,
161,162,163,164,165,166,167,168,169,170,171,187,203,219,235,251,
177,178,179,180,181,182,183,184,185,186,187,188,204,220,236,252,
193,194,195,196,197,198,199,200,201,202,203,204,205,221,237,253,
209,210,211,212,213,214,215,216,217,218,219,220,221,222,238,254,
225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,255,
241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256)]
arg. <- expand.grid(c(0.5:15.5),c(15.5:0.5))
I = as.image( Z=z_whole, x=arg., grid=list(x=seq(0.5,15.5,1), y=seq(0.5,15.5,1)))
image(I)
smooth.I <- image.smooth(I, theta=1);
#################################################
### notice the order of this sommthed image ###
#################################################
den=c()
for (r in 1:16) {
for (w in 1:r) {
den=c(den,smooth.I$z[r,16-(w-1)])
}
}
prob<-den/sum(den)
return(prob)
}
prob1=prob_glcm(c=5,s=s)
prob2=prob_glcm(c=5.5,s=s)
prob3=prob_glcm(c=6,s=s)
prob4=prob_glcm(c=6.5,s=s)
prob5=prob_glcm(c=7,s=s)
glcm=matrix(0,nrow=20*5,ncol=136)
for (j in 1:20)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob1)
}
for (j in 21:40)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob2)
}
for (j in 41:60)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob3)
}
for (j in 61:80)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob4)
}
for (j in 81:100)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob5)
}
glcm
}
Z=simu(s=15)
Z_met=Z
T_met=nrow(Z_met)
n=ncol(Z_met)
X=apply(Z_met,1,sum)
X_met=X
sX_met=(X-mean(X))/sd(X)
R=array(data = NA,dim = c(2,n,T_met))
for (t in 1: nrow(Z_met)) R[,,t]=matrix(rep(c(1,sX_met[t]),times=n),byrow = FALSE,nrow=2,ncol=n)
############################################################################
########################## MCMC ########################
############################################################################
library(HI)
library(invgamma)
source('/gstore/scratch/u/lix233/RGSDP/sdp_functions_selfwriting_V12_cpp.R')
sourceCpp('/gstore/scratch/u/lix233/RGSDP/rgsdp.cpp')
D=read.csv('/gstore/scratch/u/lix233/RGSDP/D_16.csv',header=TRUE)
W=read.csv('/gstore/scratch/u/lix233/RGSDP/W_16.csv',header=TRUE)
N=20000;Burnin=N/2
Y_iter_met=Theta_iter_met=array(data=NA,dim = c(T_met,n,N))
try=matrix(0,nrow =T_met ,ncol = n)
for (i in 1:T_met){
for (j in 1:n){
if (Z_met[i,j]==0) {
try[i,j]=rnorm(1,mean=-10,sd=1)
} else {
try[i,j]=rnorm(1,mean=Z_met[i,j],sd=1)
}
}
}
g=update_Y(Z=Z_met,X=X_met,tau2=100,Theta = try,Beta =c(0.1,0.1),R)
sum(g==Inf)+sum(g==-Inf)
Theta_iter_met[,,1]=try
tau2_met=v_met=rho_met=sig2_met=rep(NA,N)
tau2_met[1]=50
v_met[1]=0.8
rho_met[1]=0.9
sig2_met[1]=10
# v_met=rep(1,N) # Fix v
av=bv=1
atau=0.0001 ;btau=0.0001
asig=0.0001 ;bsig=0.0001
Betam=c(0,0);Sigma_m=matrix(c(10^5,0,0,10^5),nrow=2,ncol=2)
Beta_iter_met=matrix(NA,nrow=N,ncol=nrow(R[,,1]))
Beta_iter_met[1,]=c(40,20)
for (iter in 2:N) {
Y_iter_met[,,iter]=update_Y(Z_met,X_met,tau2_met[iter-1],Theta_iter_met[,,iter-1],Beta_iter_met[iter-1,],R)
Theta_iter_met[,,iter]=update_theta(as.vector(X_met),Y_iter_met[,,iter],as.matrix(D),as.matrix(W),rho_met[iter-1],Theta_iter_met[,,iter-1],sig2_met[iter-1],tau2_met[iter-1],v_met[iter-1],Beta_iter_met[iter-1,],R)
Beta_iter_met[iter,]=update_Beta(Betam,Sigma_m,tau2_met[iter-1],X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],R)
tau2_met[iter] = update_tau2(X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],atau,btau,Beta_iter_met[iter,],R)
sig2_met[iter]= update_sig2(asig,bsig,D,W,rho_met[iter-1],Theta_iter_met[,,iter])
rho_met[iter] = update_rho(D,W,Theta_iter_met[,,iter],sig2_met[iter])
v_met[iter]=update_v(Z_met,v_met[iter-1],Tstar=nrow(unique.matrix(Theta_iter_met[,,iter])),av,bv)
}
library(coda)
mcmc_beta=mcmc(Beta_iter_met[(1+Burnin):N,])
pnorm(abs(geweke.diag(mcmc_beta)$z),lower.tail=FALSE)*2
mcmc_rho=mcmc(rho_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_rho)$z),lower.tail=FALSE)*2
mcmc_sig2=mcmc(sig2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_sig2)$z),lower.tail=FALSE)*2
mcmc_tau2=mcmc(tau2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_tau2)$z),lower.tail=FALSE)*2
mcmc_v=mcmc(v_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_v)$z),lower.tail=FALSE)*2
Theta_ave=Theta_sum=matrix(0,nrow=nrow(Theta_iter_met[,,1]),ncol=ncol(Theta_iter_met[,,1]))
for (i in (Burnin+1):N) {
Theta_sum=Theta_sum+Theta_iter_met[,,i]
}
Theta_ave=Theta_sum/(N-Burnin)
library('NbClust')
NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')
HRGSDP=NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')$Best.partition
glcm_whole=Z[,c(1,2,4,7,11,16,22,29,37,46,56,67,79,92,106,121,
2,3,5,8,12,17,23,30,38,47,57,68,80,93,107,122,
4,5,6,9,13,18,24,31,39,48,58,69,81,94,108,123,
7,8,9,10,14,19,25,32,40,49,59,70,82,95,109,124,
11,12,13,14,15,20,26,33,41,50,60,71,83,96,110,125,
16,17,18,19,20,21,27,34,42,51,61,72,84,97,111,126,
22,23,24,25,26,27,28,35,43,52,62,73,85,98,112,127,
29,30,31,32,33,34,35,36,44,53,63,74,86,99,113,128,
37,38,39,40,41,42,43,44,45,54,64,75,87,100,114,129,
46,47,48,49,50,51,52,53,54,55,65,76,88,101,115,130,
56,57,58,59,60,61,62,63,64,65,66,77,89,102,116,131,
67,68,69,70,71,72,73,74,75,76,77,78,90,103,117,132,
79,80,81,82,83,84,85,86,87,88,89,90,91,104,118,133,
92,93,94,95,96,97,98,99,100,101,102,103,104,105,119,134,
106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,135,
121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136)]
source('/gstore/scratch/u/lix233/RGSDP/cal_stat.R')
features=cal_stat(glcm_whole)
GMM=Mclust(features,5)
my.dist <- function(x) dist(x, method='euclidean')
my.hclust <- function(d) hclust(d, method='ward.D2')
HC<-cutree(my.hclust(my.dist(data.matrix(features))),k=5)
KM=kmeans(features,5)
SC=specc(features,5)
CO <- ConsensusClusterPlus(t(features),maxK=9,reps=100,pItem=0.90, pFeature=1,
clusterAlg='hc',distance='euclidean',plot=FALSE)
CO <- CO[[5]]$consensusClass
aa <- table(rep(1:5,each=20), CO)
bb <- table(rep(1:5,each=20), GMM$classification)
cc <- table(rep(1:5,each=20), HC)
dd <- table(rep(1:5,each=20), KM$cluster)
ee <- table(rep(1:5,each=20), SC)
ff <- table(rep(1:5,each=20), HRGSDP)
res_FeaCO=c(chisq.test(aa,correct = TRUE)$statistic,ncol(aa),error_rate(aa), 'FeaCO')
res_FeaGMM=c(chisq.test(bb,correct = TRUE)$statistic,ncol(bb),error_rate(bb), 'FeaGMM')
res_FeaHC=c(chisq.test(cc,correct = TRUE)$statistic,ncol(cc),error_rate(cc), 'FeaHC')
res_FeaKM=c(chisq.test(dd,correct = TRUE)$statistic,ncol(dd),error_rate(dd), 'FeaKM')
res_FeaSC=c(chisq.test(ee,correct = TRUE)$statistic,ncol(ee),error_rate(ee), 'FeaSC')
res_HRGSDP=c(chisq.test(ff,correct = TRUE)$statistic,ncol(ff),error_rate(ff), 'HRGSDP')
xx = rbind(res_FeaCO, res_FeaGMM, res_FeaHC, res_FeaKM, res_FeaSC, res_HRGSDP)
colnames(xx) = c('pearson.chi.sq', 'nunber of clusters', 'error.rate', 'method')
xx = as.data.frame(xx)
print(xx)
|
testlist <- list(a = 0L, b = 0L, x = c(-134219777L, 150931466L, 1691484407L, -524536L, -16252928L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610386869-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 543 | r | testlist <- list(a = 0L, b = 0L, x = c(-134219777L, 150931466L, 1691484407L, -524536L, -16252928L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
rm(list=ls())
library(readxl)
library(maps)
library(mapdata)
library(ggplot2)
library(gbm)
library(dplyr)
library(glmnet)
setwd("C:/Users/prakh/Desktop/Intro to ML/Assignment 1/US_Data")
df = read_excel('vehicles.xlsx')
## Understanding the structure of the dataframe
str(df)
## Understanding the dimension of the dataset
dim(df)
## Total Number of Rows with NA
sapply(df, function (x) sum(is.na(x)))
# There are many columns with NA in it
## Removing all rows with NA in the dataset
df_cleaned = na.omit(df)
## % of original dataset left after omitting NAs
nrow(df_cleaned)/nrow(df)
# Only 18.51% of the rows are left which have no NAs
## Converting all categorical features into factors
df_cleaned[sapply(df_cleaned, is.character)] <-
lapply(df_cleaned[sapply(df_cleaned, is.character)],as.factor)
#############
# outlier removal
# price > 0
df_cleaned = df_cleaned[df_cleaned$price >= 0,]
# removing car in salvage category
df_cleaned = df_cleaned[
df_cleaned$condition != 'salvage',
]
# remove car which are possible vintage
vintage_year = 1980
df_cleaned = df_cleaned[
df_cleaned$year >= vintage_year,
]
df_cleaned = df_cleaned[
df_cleaned$year <= 2021,
]
# getting rid of harley-davidson
df_cleaned = df_cleaned[
df_cleaned$manufacturer != 'harley-davidson',
]
# getting rid of bus
df_cleaned = df_cleaned[
df_cleaned$type != 'bus',
]
# getting rid of x mile cover
odometer_limit = 350000
df_cleaned = df_cleaned[
df_cleaned$odometer <= odometer_limit,
]
# consider car which have clean title
df_cleaned = df_cleaned[
df_cleaned$title_status == 'clean',
]
# putting a reasonable limit on price
min_price_limit = 1000
df_cleaned = df_cleaned[
df_cleaned$price >= min_price_limit,
]
max_price_limit = 200000
df_cleaned = df_cleaned[
df_cleaned$price <= max_price_limit,
]
## EDA
# Checking levels each column of the dataset has
sapply(df_cleaned, function(x) length(levels(x)))
# Top 10 regions with car listings
top_regions = head(sort(table(df_cleaned$region),decreasing = TRUE), 10)
barplot(top_regions,main='Top 10 Regions by Car Listings',
xlab='region',ylab='Number of car listings',las=2,col='darkred')
# Top 10 manufacturers with car listings
top_manufacturer = head(sort(table(df_cleaned$manufacturer),decreasing = TRUE), 10)
barplot(top_manufacturer,main='Top 10 manufacturers by Car Listings',
xlab='manufacturer',ylab='Number of car listings',las=2,col='orange')
# Distribution of Condition of Cars
barplot(table(df_cleaned$condition),main='Distribution of Car Conditions',
xlab='Car condition', ylab='total count of listings',las=2,col='brown')
# Distribution of Price feature
hist(df_cleaned$price,xlab = 'price of a car', ylab = 'count of cars',main='Distribution of price',col='darkred')
# Distribution of Odometer feature
hist(df_cleaned$odometer,xlab = 'odometer of a car', ylab = 'count of cars',main='Distribution of odometer')
# Distribution of Cylinder feature
barplot(table(df_cleaned$cylinders),xlab = 'Cylinders in a car', ylab = 'Count',main='Distribution of Cylinders in ca')
# Distribution of year feature
barplot(table(df_cleaned$year),xlab = 'Year of manufacture of a car', ylab = 'Count',main='Distribution of year of manufacture of car')
# Boxplot of Price vs Year
boxplot(price~year,df_cleaned, main="Price vs Registration year",
xlab="Year of registration", ylab="Price of car")
# Scatterplot of Price vs Odometer
plot(price~odometer,df_cleaned, main="Price vs Odometer of a car",
xlab="Price of car", ylab="Odometer reading")
# Geoplot of lat-long
lat_long_df = data.frame(df_cleaned$lat,df_cleaned$long)
names(lat_long_df) = c('lat','long')
lat_long_df <- data.table::as.data.table(lat_long_df)
lat_long_df = lat_long_df[(lat_long_df$lat>20) & (lat_long_df$lat<65)]
lat_long_df = lat_long_df[(lat_long_df$long<-70) & (lat_long_df$long>-150)]
usa <- map_data("state")
ggplot() + geom_polygon(data = usa,
aes(x=long, y = lat,
group = group),fill = NA,
col = 'orange')+theme_bw()+xlim(c(-150,-69)) + geom_point(data = lat_long_df, aes(x = long, y = lat, color = "red", alpha = 0.65), size = 0.5, shape = 1)
## Data Cleaning
# clean dates
df_cleaned$posting_date = as.Date(df_cleaned$posting_date)
df_cleaned$posting_weekday = weekdays(df_cleaned$posting_date)
df_cleaned$logprice = log(df_cleaned$price)
size_df <- df_cleaned$size
type_df <- df_cleaned$type
odo_df <- df_cleaned$odometer
col_df <- df_cleaned$paint_color
#The distribution of size
#Full sized car dominates the market followed by mid-size and compact
plot(size_df)
simple_size = df_cleaned[df_cleaned$size != 'sub-compact',]
ggplot(simple_size, aes(year, logprice, shape=size, colour=size, fill=size)) +
geom_smooth(method='lm') +
labs(x='Age of Car', y='Price (logprice)',
title = 'Old vs. Price',
subtitle='Different impact the size of a car having',
caption='Linear Regression Line')
#The distribution of type
#Most number of cars sold: Sedan followed by SUV and Truck
plot(type_df)
simple_type = df_cleaned %>% filter(type == 'SUV' | type == 'sedan' | type == 'truck')
ggplot(simple_type, aes(year, logprice, shape=type, colour=type, fill=type)) +
geom_smooth(method='lm') +
labs(x='Age of Car', y='Price (logprice)',
title = 'Old vs. Price',
subtitle='Different impact the type of a car having',
caption='Linear Regression Line')
#Does cylinders matter for the appreciation of a car?
#(Based on the three cylinders 4, 6, 8)
simple_cyl = df_cleaned %>% filter(cylinders == '4 cylinders' | cylinders == '6 cylinders' | cylinders == '8 cylinders')
ggplot(simple_cyl, aes(year, logprice, shape=cylinders, colour=cylinders, fill=cylinders)) +
geom_smooth(method='lm') +
labs(x='Age of Car', y='Price (logprice)',
title = 'Old vs. Price',
subtitle='Different impact the cylinders of a car having',
caption='Linear Regression Line')
#Which-colored car is sold most frequently?
plot(col_df)
#Rough relationship between year and price
ggplot(data=df_cleaned, aes(x=year, y=price )) +
geom_point(shape=10, colour='orange') +
geom_smooth() +
labs(title='Old vs. Price', subtitle='Minimum Price limit Exists?',
y='Price of Used car (US Dollar)',
x='Age of car',
caption='blue line = trend')
#The price of used car might be affect by the performance of the car
ggplot(data=df_cleaned, aes(x=odometer, y=price )) +
geom_point(shape=10, colour='orange') +
geom_smooth() +
labs(title='Mileage vs. Price',
y='Price of Car (US Dollar)',
x='Odometer (Mile)',
caption='blue line = trend')
#Does manufacturer affect the price of used car?
ggplot(data=df_cleaned, aes(x=manufacturer, y=price )) +
geom_point(shape=10, colour='orange') +
labs(title='Price vs. Brand',
y='Price of Used car (US Dollar)',
x='Manufacturer')
#The impact of a car's brand on its apprecation of monetary car-value
simple_brand = df_cleaned %>% filter(manufacturer == 'ford'|manufacturer == 'chevrolet'|manufacturer == 'toyota')
ggplot(simple_brand, aes(year, logprice, shape=manufacturer, colour=manufacturer, fill=manufacturer)) +
geom_smooth(method='lm') +
labs(x='Age of Car', y='Price (logprice)',
title = 'Old vs. Price',
subtitle='Impact of Brand on Appreciation')
dim(df_cleaned)
##################################################
##################### Modeling ##################
##################################################
# Variable to get rid of - region, model, title_status, posting_date
drop_vars = c('region', 'model', 'title_status', 'posting_date', 'posting_weekday','lat','long')
car_resale_model = df_cleaned[
, !colnames(df_cleaned) %in% drop_vars
]
df_cleaned = droplevels(df_cleaned)
car_resale_model = droplevels(car_resale_model)
summary(car_resale_model)
dim(car_resale_model)
# getting cleaned data for the modeling
car_resale_model = subset(car_resale_model, select = -c(price))
# splitting into train and test set
set.seed(1)
n_test = 2000
ind = sample(1:dim(car_resale_model)[1], n_test)
train = car_resale_model[-ind,]
test = car_resale_model[ind,]
train_cv_df = data.frame(matrix(nrow=0, ncol=4))
colnames(train_cv_df) = c('model_type', 'hyperparameter', 'avg_rmse_os', 'avg_rmse_is')
##################################################
############## Linear Regression #################
##################################################
model_type = 'Linear Regression'
hyperparameter = 'all_models'
# starting K-fold validation for Linear regression
k_folds = 5
n_train = dim(train)[1]
n0 = round(n_train/k_folds, 0)
out_MSE = matrix(0, k_folds)
in_MSE = matrix(0, k_folds)
model_lr_list = list()
used = NULL
set = 1:n_train
set.seed(1)
# for loop for each fold
for(j in 1:k_folds){
if(n0<length(set)){val = sample(set,n0)}
if(n0>=length(set)){val=set}
train_i = train[-val,]
test_i = train[val,]
model_i = glm(logprice ~ ., family = gaussian, data=train_i)
prediction_i = predict(model_i, newdata = test_i)
rmse_is_i = sqrt(mean((model_i$residuals)^2))
in_MSE[j] = rmse_is_i
rmse_os_i = sqrt(mean((test_i$logprice - prediction_i)^2))
out_MSE[j] = rmse_os_i
model_lr_list[[j]] = model_i
used = union(used,val)
set = (1:n_train)[-used]
}
avg_rmse_os = mean(out_MSE)
avg_rmse_is = mean(in_MSE)
train_cv_df = rbind(train_cv_df, cbind(model_type, hyperparameter, avg_rmse_os, avg_rmse_is))
##################################################
############## Ridge Regression #################
##################################################
model_type = 'Ridge Regression'
alpha = 0
lambda_list = c(0.001, 0.01, 0.1)
# starting K-fold validation for Linear regression
k_folds = 5
n_train = dim(train)[1]
n0 = round(n_train/k_folds, 0)
out_MSE = matrix(0, k_folds)
in_MSE = matrix(0, k_folds)
model_rr_list = list()
set.seed(1)
# for loop for each fold
for(lambda in lambda_list){
used = NULL
set = 1:n_train
for(j in 1:k_folds){
if(n0<length(set)){val = sample(set,n0)}
if(n0>=length(set)){val=set}
Xtrain_i = model.matrix(~.,subset(train[-val,], select=-logprice))
Ytrain_i = train[-val,]$logprice
Xtest_i = model.matrix(~.,subset(train[val,], select=-logprice))
Ytest_i = train[val,]$logprice
model_i = glmnet(Xtrain_i, Ytrain_i, alpha = alpha, lambda = lambda)
prediction_i = predict(model_i, newx = Xtest_i)
rmse_is_i = sqrt(
(1 - model_i$dev.ratio)*model_i$nulldev/dim(Xtrain_i)[1])
in_MSE[j] = rmse_is_i
rmse_os_i = sqrt(mean((Ytest_i - prediction_i)^2))
out_MSE[j] = rmse_os_i
model_rr_list[[j]] = model_i
used = union(used,val)
set = (1:n_train)[-used]
}
avg_rmse_os = mean(out_MSE)
avg_rmse_is = mean(in_MSE)
hyperparameter = lambda
train_cv_df = rbind(train_cv_df, cbind(model_type, hyperparameter, avg_rmse_os, avg_rmse_is))
}
##################################################
############## LASSO Regression #################
##################################################
model_type = 'Lasso Regression'
alpha = 1
lambda_list = c(0.001, 0.01, 0.1)
# starting K-fold validation for Linear regression
k_folds = 5
n_train = dim(train)[1]
n0 = round(n_train/k_folds, 0)
out_MSE = matrix(0, k_folds)
in_MSE = matrix(0, k_folds)
model_lasso_list = list()
set.seed(1)
# for loop for each fold
for(lambda in lambda_list){
used = NULL
set = 1:n_train
for(j in 1:k_folds){
if(n0<length(set)){val = sample(set,n0)}
if(n0>=length(set)){val=set}
Xtrain_i = model.matrix(~.,subset(train[-val,], select=-logprice))
Ytrain_i = train[-val,]$logprice
Xtest_i = model.matrix(~.,subset(train[val,], select=-logprice))
Ytest_i = train[val,]$logprice
model_i = glmnet(Xtrain_i, Ytrain_i, alpha = alpha, lambda = lambda)
prediction_i = predict(model_i, newx = Xtest_i)
rmse_is_i = sqrt(
(1 - model_i$dev.ratio)*model_i$nulldev/dim(Xtrain_i)[1])
in_MSE[j] = rmse_is_i
rmse_os_i = sqrt(mean((Ytest_i - prediction_i)^2))
out_MSE[j] = rmse_os_i
model_lasso_list[[j]] = model_i
used = union(used,val)
set = (1:n_train)[-used]
}
avg_rmse_os = mean(out_MSE)
avg_rmse_is = mean(in_MSE)
hyperparameter = lambda
train_cv_df = rbind(train_cv_df, cbind(model_type, hyperparameter, avg_rmse_os, avg_rmse_is))
}
##################################################
#################### Boosting ####################
##################################################
model_type = 'Boosting'
# Boosting Hyperparameters
# (i) depth (ii) number of trees (iii) lamda = shrinkage.
depth_list = c(10,20)
ntree_list = c(50,100)
lambda_list = c(.001,.1)
param_grid = expand.grid(depth_list, ntree_list, lambda_list)
param_len = nrow(param_grid)
# starting K-fold validation for Linear regression
k_folds = 5
n_train = dim(train)[1]
n0 = round(n_train/k_folds, 0)
out_MSE = matrix(0, k_folds)
in_MSE = matrix(0, k_folds)
model_boosting_list = list()
set.seed(1)
for(i in 1:param_len){
used = NULL
set = 1:n_train
depth = param_grid[i, 1]
ntree = param_grid[i, 2]
lambda = param_grid[i, 3]
print(c(depth, ntree, lambda))
for(j in 1:k_folds){
if(n0<length(set)){val = sample(set,n0)}
if(n0>=length(set)){val=set}
train_i = train[-val,]
test_i = train[val,]
model_i = gbm(logprice~., data=train_i, distribution='gaussian',
interaction.depth=depth,
n.trees=ntree,
shrinkage=lambda)
prediction_i = predict(model_i, newdata=test_i, n.trees=ntree)
rmse_is_i = sqrt(mean(model_i$train.error))
in_MSE[j] = rmse_is_i
rmse_os_i = sqrt(mean((Ytest_i - prediction_i)^2))
out_MSE[j] = rmse_os_i
model_boosting_list[[j]] = model_i
used = union(used,val)
set = (1:n_train)[-used]
}
avg_rmse_os = mean(out_MSE)
avg_rmse_is = mean(in_MSE)
hyperparameter = paste(depth, ntree, lambda)
train_cv_df = rbind(train_cv_df, cbind(model_type, hyperparameter, avg_rmse_os, avg_rmse_is))
}
avg_rmse_os
train_cv_df
variable_imp = summary.gbm(model_boosting_list[[1]])
variable_imp$rel.inf = as.numeric(variable_imp$rel.inf)
variable_imp$rel.inf <- sort(variable_imp$rel.inf,decreasing = TRUE)
variable_imp
p = ggplot(variable_imp,aes(reorder(var,rel.inf),rel.inf))+geom_bar(stat='identity',fill='steelblue')+ylab('value')+xlab('feature')
p+coord_flip()
##################################################
############## Out of sample testing #############
##################################################
# pvalue are not reported as they are not relevant
# final model selection - lasso regression with lambda= 0.001
set.seed(1)
alpha = 1
lambda = 0.001
Xtrain = model.matrix(~.,subset(train, select=-logprice))
Ytrain = train$logprice
Xtest = model.matrix(~.,subset(test, select=-logprice))
Ytest = test$logprice
model = glmnet(Xtrain, Ytrain, alpha = alpha, lambda = lambda)
prediction = predict(model, newx = Xtest)
rmse_is = sqrt(
(1 - model$dev.ratio)*model$nulldev/dim(Xtrain)[1])
rmse_os_antilog = sqrt(mean((exp(test$logprice) - exp(prediction))^2))
rmse_os_antilog
rmse_os = sqrt(mean((Ytest - prediction)^2))
print(c(rmse_is, rmse_os))
c(model$beta, model$a0)
summary(model)
plot(prediction, Ytest, main='Actuals vs Prediction', xlab='Predictions', ylab='Actuals')
abline(0, 1, col='red')
residuals = Ytest - prediction
plot(residuals, main='residual plot')
abline(h = 0, col='red')
| /Used_Car_Prices_Prediction.R | no_license | prakharb13/Used-Car-Prices-Prediction-in-R | R | false | false | 16,215 | r | rm(list=ls())
library(readxl)
library(maps)
library(mapdata)
library(ggplot2)
library(gbm)
library(dplyr)
library(glmnet)
setwd("C:/Users/prakh/Desktop/Intro to ML/Assignment 1/US_Data")
df = read_excel('vehicles.xlsx')
## Understanding the structure of the dataframe
str(df)
## Understanding the dimension of the dataset
dim(df)
## Total Number of Rows with NA
sapply(df, function (x) sum(is.na(x)))
# There are many columns with NA in it
## Removing all rows with NA in the dataset
df_cleaned = na.omit(df)
## % of original dataset left after omitting NAs
nrow(df_cleaned)/nrow(df)
# Only 18.51% of the rows are left which have no NAs
## Converting all categorical features into factors
df_cleaned[sapply(df_cleaned, is.character)] <-
lapply(df_cleaned[sapply(df_cleaned, is.character)],as.factor)
#############
# outlier removal
# price > 0
df_cleaned = df_cleaned[df_cleaned$price >= 0,]
# removing car in salvage category
df_cleaned = df_cleaned[
df_cleaned$condition != 'salvage',
]
# remove car which are possible vintage
vintage_year = 1980
df_cleaned = df_cleaned[
df_cleaned$year >= vintage_year,
]
df_cleaned = df_cleaned[
df_cleaned$year <= 2021,
]
# getting rid of harley-davidson
df_cleaned = df_cleaned[
df_cleaned$manufacturer != 'harley-davidson',
]
# getting rid of bus
df_cleaned = df_cleaned[
df_cleaned$type != 'bus',
]
# getting rid of x mile cover
odometer_limit = 350000
df_cleaned = df_cleaned[
df_cleaned$odometer <= odometer_limit,
]
# consider car which have clean title
df_cleaned = df_cleaned[
df_cleaned$title_status == 'clean',
]
# putting a reasonable limit on price
min_price_limit = 1000
df_cleaned = df_cleaned[
df_cleaned$price >= min_price_limit,
]
max_price_limit = 200000
df_cleaned = df_cleaned[
df_cleaned$price <= max_price_limit,
]
## EDA
# Checking levels each column of the dataset has
sapply(df_cleaned, function(x) length(levels(x)))
# Top 10 regions with car listings
top_regions = head(sort(table(df_cleaned$region),decreasing = TRUE), 10)
barplot(top_regions,main='Top 10 Regions by Car Listings',
xlab='region',ylab='Number of car listings',las=2,col='darkred')
# Top 10 manufacturers with car listings
top_manufacturer = head(sort(table(df_cleaned$manufacturer),decreasing = TRUE), 10)
barplot(top_manufacturer,main='Top 10 manufacturers by Car Listings',
xlab='manufacturer',ylab='Number of car listings',las=2,col='orange')
# Distribution of Condition of Cars
barplot(table(df_cleaned$condition),main='Distribution of Car Conditions',
xlab='Car condition', ylab='total count of listings',las=2,col='brown')
# Distribution of Price feature
hist(df_cleaned$price,xlab = 'price of a car', ylab = 'count of cars',main='Distribution of price',col='darkred')
# Distribution of Odometer feature
hist(df_cleaned$odometer,xlab = 'odometer of a car', ylab = 'count of cars',main='Distribution of odometer')
# Distribution of Cylinder feature
barplot(table(df_cleaned$cylinders),xlab = 'Cylinders in a car', ylab = 'Count',main='Distribution of Cylinders in ca')
# Distribution of year feature
barplot(table(df_cleaned$year),xlab = 'Year of manufacture of a car', ylab = 'Count',main='Distribution of year of manufacture of car')
# Boxplot of Price vs Year
boxplot(price~year,df_cleaned, main="Price vs Registration year",
xlab="Year of registration", ylab="Price of car")
# Scatterplot of Price vs Odometer
plot(price~odometer,df_cleaned, main="Price vs Odometer of a car",
xlab="Price of car", ylab="Odometer reading")
# Geoplot of lat-long
lat_long_df = data.frame(df_cleaned$lat,df_cleaned$long)
names(lat_long_df) = c('lat','long')
lat_long_df <- data.table::as.data.table(lat_long_df)
lat_long_df = lat_long_df[(lat_long_df$lat>20) & (lat_long_df$lat<65)]
lat_long_df = lat_long_df[(lat_long_df$long<-70) & (lat_long_df$long>-150)]
usa <- map_data("state")
ggplot() + geom_polygon(data = usa,
aes(x=long, y = lat,
group = group),fill = NA,
col = 'orange')+theme_bw()+xlim(c(-150,-69)) + geom_point(data = lat_long_df, aes(x = long, y = lat, color = "red", alpha = 0.65), size = 0.5, shape = 1)
## Data Cleaning
# clean dates
df_cleaned$posting_date = as.Date(df_cleaned$posting_date)
df_cleaned$posting_weekday = weekdays(df_cleaned$posting_date)
df_cleaned$logprice = log(df_cleaned$price)
size_df <- df_cleaned$size
type_df <- df_cleaned$type
odo_df <- df_cleaned$odometer
col_df <- df_cleaned$paint_color
#The distribution of size
#Full sized car dominates the market followed by mid-size and compact
plot(size_df)
simple_size = df_cleaned[df_cleaned$size != 'sub-compact',]
ggplot(simple_size, aes(year, logprice, shape=size, colour=size, fill=size)) +
geom_smooth(method='lm') +
labs(x='Age of Car', y='Price (logprice)',
title = 'Old vs. Price',
subtitle='Different impact the size of a car having',
caption='Linear Regression Line')
#The distribution of type
#Most number of cars sold: Sedan followed by SUV and Truck
plot(type_df)
simple_type = df_cleaned %>% filter(type == 'SUV' | type == 'sedan' | type == 'truck')
ggplot(simple_type, aes(year, logprice, shape=type, colour=type, fill=type)) +
geom_smooth(method='lm') +
labs(x='Age of Car', y='Price (logprice)',
title = 'Old vs. Price',
subtitle='Different impact the type of a car having',
caption='Linear Regression Line')
#Does cylinders matter for the appreciation of a car?
#(Based on the three cylinders 4, 6, 8)
simple_cyl = df_cleaned %>% filter(cylinders == '4 cylinders' | cylinders == '6 cylinders' | cylinders == '8 cylinders')
ggplot(simple_cyl, aes(year, logprice, shape=cylinders, colour=cylinders, fill=cylinders)) +
geom_smooth(method='lm') +
labs(x='Age of Car', y='Price (logprice)',
title = 'Old vs. Price',
subtitle='Different impact the cylinders of a car having',
caption='Linear Regression Line')
#Which-colored car is sold most frequently?
plot(col_df)
#Rough relationship between year and price
ggplot(data=df_cleaned, aes(x=year, y=price )) +
geom_point(shape=10, colour='orange') +
geom_smooth() +
labs(title='Old vs. Price', subtitle='Minimum Price limit Exists?',
y='Price of Used car (US Dollar)',
x='Age of car',
caption='blue line = trend')
#The price of used car might be affect by the performance of the car
ggplot(data=df_cleaned, aes(x=odometer, y=price )) +
geom_point(shape=10, colour='orange') +
geom_smooth() +
labs(title='Mileage vs. Price',
y='Price of Car (US Dollar)',
x='Odometer (Mile)',
caption='blue line = trend')
#Does manufacturer affect the price of used car?
ggplot(data=df_cleaned, aes(x=manufacturer, y=price )) +
geom_point(shape=10, colour='orange') +
labs(title='Price vs. Brand',
y='Price of Used car (US Dollar)',
x='Manufacturer')
#The impact of a car's brand on its apprecation of monetary car-value
simple_brand = df_cleaned %>% filter(manufacturer == 'ford'|manufacturer == 'chevrolet'|manufacturer == 'toyota')
ggplot(simple_brand, aes(year, logprice, shape=manufacturer, colour=manufacturer, fill=manufacturer)) +
geom_smooth(method='lm') +
labs(x='Age of Car', y='Price (logprice)',
title = 'Old vs. Price',
subtitle='Impact of Brand on Appreciation')
dim(df_cleaned)
##################################################
##################### Modeling ##################
##################################################
# Variable to get rid of - region, model, title_status, posting_date
drop_vars = c('region', 'model', 'title_status', 'posting_date', 'posting_weekday','lat','long')
car_resale_model = df_cleaned[
, !colnames(df_cleaned) %in% drop_vars
]
df_cleaned = droplevels(df_cleaned)
car_resale_model = droplevels(car_resale_model)
summary(car_resale_model)
dim(car_resale_model)
# getting cleaned data for the modeling
car_resale_model = subset(car_resale_model, select = -c(price))
# splitting into train and test set
set.seed(1)
n_test = 2000
ind = sample(1:dim(car_resale_model)[1], n_test)
train = car_resale_model[-ind,]
test = car_resale_model[ind,]
train_cv_df = data.frame(matrix(nrow=0, ncol=4))
colnames(train_cv_df) = c('model_type', 'hyperparameter', 'avg_rmse_os', 'avg_rmse_is')
##################################################
############## Linear Regression #################
##################################################
model_type = 'Linear Regression'
hyperparameter = 'all_models'
# starting K-fold validation for Linear regression
k_folds = 5
n_train = dim(train)[1]
n0 = round(n_train/k_folds, 0)
out_MSE = matrix(0, k_folds)
in_MSE = matrix(0, k_folds)
model_lr_list = list()
used = NULL
set = 1:n_train
set.seed(1)
# for loop for each fold
for(j in 1:k_folds){
if(n0<length(set)){val = sample(set,n0)}
if(n0>=length(set)){val=set}
train_i = train[-val,]
test_i = train[val,]
model_i = glm(logprice ~ ., family = gaussian, data=train_i)
prediction_i = predict(model_i, newdata = test_i)
rmse_is_i = sqrt(mean((model_i$residuals)^2))
in_MSE[j] = rmse_is_i
rmse_os_i = sqrt(mean((test_i$logprice - prediction_i)^2))
out_MSE[j] = rmse_os_i
model_lr_list[[j]] = model_i
used = union(used,val)
set = (1:n_train)[-used]
}
avg_rmse_os = mean(out_MSE)
avg_rmse_is = mean(in_MSE)
train_cv_df = rbind(train_cv_df, cbind(model_type, hyperparameter, avg_rmse_os, avg_rmse_is))
##################################################
############## Ridge Regression #################
##################################################
model_type = 'Ridge Regression'
alpha = 0
lambda_list = c(0.001, 0.01, 0.1)
# starting K-fold validation for Linear regression
k_folds = 5
n_train = dim(train)[1]
n0 = round(n_train/k_folds, 0)
out_MSE = matrix(0, k_folds)
in_MSE = matrix(0, k_folds)
model_rr_list = list()
set.seed(1)
# for loop for each fold
for(lambda in lambda_list){
used = NULL
set = 1:n_train
for(j in 1:k_folds){
if(n0<length(set)){val = sample(set,n0)}
if(n0>=length(set)){val=set}
Xtrain_i = model.matrix(~.,subset(train[-val,], select=-logprice))
Ytrain_i = train[-val,]$logprice
Xtest_i = model.matrix(~.,subset(train[val,], select=-logprice))
Ytest_i = train[val,]$logprice
model_i = glmnet(Xtrain_i, Ytrain_i, alpha = alpha, lambda = lambda)
prediction_i = predict(model_i, newx = Xtest_i)
rmse_is_i = sqrt(
(1 - model_i$dev.ratio)*model_i$nulldev/dim(Xtrain_i)[1])
in_MSE[j] = rmse_is_i
rmse_os_i = sqrt(mean((Ytest_i - prediction_i)^2))
out_MSE[j] = rmse_os_i
model_rr_list[[j]] = model_i
used = union(used,val)
set = (1:n_train)[-used]
}
avg_rmse_os = mean(out_MSE)
avg_rmse_is = mean(in_MSE)
hyperparameter = lambda
train_cv_df = rbind(train_cv_df, cbind(model_type, hyperparameter, avg_rmse_os, avg_rmse_is))
}
##################################################
############## LASSO Regression #################
##################################################
model_type = 'Lasso Regression'
alpha = 1
lambda_list = c(0.001, 0.01, 0.1)
# starting K-fold validation for Linear regression
k_folds = 5
n_train = dim(train)[1]
n0 = round(n_train/k_folds, 0)
out_MSE = matrix(0, k_folds)
in_MSE = matrix(0, k_folds)
model_lasso_list = list()
set.seed(1)
# for loop for each fold
for(lambda in lambda_list){
used = NULL
set = 1:n_train
for(j in 1:k_folds){
if(n0<length(set)){val = sample(set,n0)}
if(n0>=length(set)){val=set}
Xtrain_i = model.matrix(~.,subset(train[-val,], select=-logprice))
Ytrain_i = train[-val,]$logprice
Xtest_i = model.matrix(~.,subset(train[val,], select=-logprice))
Ytest_i = train[val,]$logprice
model_i = glmnet(Xtrain_i, Ytrain_i, alpha = alpha, lambda = lambda)
prediction_i = predict(model_i, newx = Xtest_i)
rmse_is_i = sqrt(
(1 - model_i$dev.ratio)*model_i$nulldev/dim(Xtrain_i)[1])
in_MSE[j] = rmse_is_i
rmse_os_i = sqrt(mean((Ytest_i - prediction_i)^2))
out_MSE[j] = rmse_os_i
model_lasso_list[[j]] = model_i
used = union(used,val)
set = (1:n_train)[-used]
}
avg_rmse_os = mean(out_MSE)
avg_rmse_is = mean(in_MSE)
hyperparameter = lambda
train_cv_df = rbind(train_cv_df, cbind(model_type, hyperparameter, avg_rmse_os, avg_rmse_is))
}
##################################################
#################### Boosting ####################
##################################################
model_type = 'Boosting'
# Boosting Hyperparameters
# (i) depth (ii) number of trees (iii) lamda = shrinkage.
depth_list = c(10,20)
ntree_list = c(50,100)
lambda_list = c(.001,.1)
param_grid = expand.grid(depth_list, ntree_list, lambda_list)
param_len = nrow(param_grid)
# starting K-fold validation for Linear regression
k_folds = 5
n_train = dim(train)[1]
n0 = round(n_train/k_folds, 0)
out_MSE = matrix(0, k_folds)
in_MSE = matrix(0, k_folds)
model_boosting_list = list()
set.seed(1)
for(i in 1:param_len){
used = NULL
set = 1:n_train
depth = param_grid[i, 1]
ntree = param_grid[i, 2]
lambda = param_grid[i, 3]
print(c(depth, ntree, lambda))
for(j in 1:k_folds){
if(n0<length(set)){val = sample(set,n0)}
if(n0>=length(set)){val=set}
train_i = train[-val,]
test_i = train[val,]
model_i = gbm(logprice~., data=train_i, distribution='gaussian',
interaction.depth=depth,
n.trees=ntree,
shrinkage=lambda)
prediction_i = predict(model_i, newdata=test_i, n.trees=ntree)
rmse_is_i = sqrt(mean(model_i$train.error))
in_MSE[j] = rmse_is_i
rmse_os_i = sqrt(mean((Ytest_i - prediction_i)^2))
out_MSE[j] = rmse_os_i
model_boosting_list[[j]] = model_i
used = union(used,val)
set = (1:n_train)[-used]
}
avg_rmse_os = mean(out_MSE)
avg_rmse_is = mean(in_MSE)
hyperparameter = paste(depth, ntree, lambda)
train_cv_df = rbind(train_cv_df, cbind(model_type, hyperparameter, avg_rmse_os, avg_rmse_is))
}
avg_rmse_os
train_cv_df
variable_imp = summary.gbm(model_boosting_list[[1]])
variable_imp$rel.inf = as.numeric(variable_imp$rel.inf)
variable_imp$rel.inf <- sort(variable_imp$rel.inf,decreasing = TRUE)
variable_imp
p = ggplot(variable_imp,aes(reorder(var,rel.inf),rel.inf))+geom_bar(stat='identity',fill='steelblue')+ylab('value')+xlab('feature')
p+coord_flip()
##################################################
############## Out of sample testing #############
##################################################
# pvalue are not reported as they are not relevant
# final model selection - lasso regression with lambda= 0.001
set.seed(1)
alpha = 1
lambda = 0.001
Xtrain = model.matrix(~.,subset(train, select=-logprice))
Ytrain = train$logprice
Xtest = model.matrix(~.,subset(test, select=-logprice))
Ytest = test$logprice
model = glmnet(Xtrain, Ytrain, alpha = alpha, lambda = lambda)
prediction = predict(model, newx = Xtest)
rmse_is = sqrt(
(1 - model$dev.ratio)*model$nulldev/dim(Xtrain)[1])
rmse_os_antilog = sqrt(mean((exp(test$logprice) - exp(prediction))^2))
rmse_os_antilog
rmse_os = sqrt(mean((Ytest - prediction)^2))
print(c(rmse_is, rmse_os))
c(model$beta, model$a0)
summary(model)
plot(prediction, Ytest, main='Actuals vs Prediction', xlab='Predictions', ylab='Actuals')
abline(0, 1, col='red')
residuals = Ytest - prediction
plot(residuals, main='residual plot')
abline(h = 0, col='red')
|
companies <- read.delim("companies.txt", header = TRUE, na.strings=c("","NA") )
rounds2 <- read.csv("rounds2.csv", stringsAsFactors = FALSE, na.strings=c("","NA"))
#Table 1.1: Understand the Data Set
#1. How many unique companies are present in rounds2?
#Method1: Using Dplyr:
count(distinct(rounds2, company_permalink))
#Method2: Without using any external packages:
length(unique(rounds2$company_permalink))
#2. How many unique companies are present in the companies file?
# Understanding is permalink link coulmn in companes is the primary key.
# Therefore number of rows equals the unique companies
#Method1: Using Dplyr:
count(distinct(companies, name))
#Method2: Without using any external packages:
length(unique(companies$name))
#5.Merge the two data frames so that all variables (columns)
#in the companies frame are added to the rounds2 data frame.
#Name the merged frame master_frame.
#How many observations are present in master_frame ?
names(companies)[names(companies) == "permalink"] <- "company_permalink"
companies$company_permalink <- tolower(companies$company_permalink)
rounds2$company_permalink <- tolower(rounds2$company_permalink)
master_frame <- merge(rounds2, companies, by="company_permalink")
#Table 2.1: Average Values of Investments for Each of these Funding Types
funding_type_groups <- group_by(master_frame, funding_round_type)
summarise(funding_type_groups, mean(raised_amount_usd, na.rm = T))
#Table 3.1:
venture <- subset(master_frame, funding_round_type == "venture")
venture_country_groups <- group_by(venture, country_code)
vcg_total_funding <- summarise(venture_country_groups, sum(raised_amount_usd, na.rm = T))
names(vcg_total_funding) <- c("country_code","total_funding_amt")
vcg_total_funding <- na.omit(vcg_total_funding)
arrange(top_n(vcg_total_funding, 9, total_funding_amt), desc(total_funding_amt))
| /InvestmentCaseStudy.r | no_license | Bhumit-t/InvestmentCaseStuday | R | false | false | 1,919 | r | companies <- read.delim("companies.txt", header = TRUE, na.strings=c("","NA") )
rounds2 <- read.csv("rounds2.csv", stringsAsFactors = FALSE, na.strings=c("","NA"))
#Table 1.1: Understand the Data Set
#1. How many unique companies are present in rounds2?
#Method1: Using Dplyr:
count(distinct(rounds2, company_permalink))
#Method2: Without using any external packages:
length(unique(rounds2$company_permalink))
#2. How many unique companies are present in the companies file?
# Understanding is permalink link coulmn in companes is the primary key.
# Therefore number of rows equals the unique companies
#Method1: Using Dplyr:
count(distinct(companies, name))
#Method2: Without using any external packages:
length(unique(companies$name))
#5.Merge the two data frames so that all variables (columns)
#in the companies frame are added to the rounds2 data frame.
#Name the merged frame master_frame.
#How many observations are present in master_frame ?
names(companies)[names(companies) == "permalink"] <- "company_permalink"
companies$company_permalink <- tolower(companies$company_permalink)
rounds2$company_permalink <- tolower(rounds2$company_permalink)
master_frame <- merge(rounds2, companies, by="company_permalink")
#Table 2.1: Average Values of Investments for Each of these Funding Types
funding_type_groups <- group_by(master_frame, funding_round_type)
summarise(funding_type_groups, mean(raised_amount_usd, na.rm = T))
#Table 3.1:
venture <- subset(master_frame, funding_round_type == "venture")
venture_country_groups <- group_by(venture, country_code)
vcg_total_funding <- summarise(venture_country_groups, sum(raised_amount_usd, na.rm = T))
names(vcg_total_funding) <- c("country_code","total_funding_amt")
vcg_total_funding <- na.omit(vcg_total_funding)
arrange(top_n(vcg_total_funding, 9, total_funding_amt), desc(total_funding_amt))
|
fn <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
fl <- tempfile()
download.file(fn, fl) # Download the file
file <- unzip(fl) # Unzip it
unlink(fl)
# Read the file
pwr <- read.table(file, header = TRUE, stringsAsFactors = FALSE, sep = ";")
pwr$Global_active_power <- as.numeric(pwr$Global_active_power)
pwr$Date <- as.Date(pwr$Date, "%d/%m/%Y")
x <- subset(pwr, pwr$Date == "2007-02-01" | pwr$Date == "2007-02-02",
select=c(Global_active_power, Date, Time))
x$DT <- as.POSIXct(strptime(paste(x$Date, x$Time), format = "%Y-%m-%d %H:%M:%S"))
png(filename = "plot2.png", width = 480, height = 480)
plot(x$DT, x$Global_active_power, ylab = "Global Active Power (kilowatts)",
type="l", xlab="")
dev.off() | /Plot2.R | no_license | Timlri/ExData_Plotting1 | R | false | false | 782 | r |
fn <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
fl <- tempfile()
download.file(fn, fl) # Download the file
file <- unzip(fl) # Unzip it
unlink(fl)
# Read the file
pwr <- read.table(file, header = TRUE, stringsAsFactors = FALSE, sep = ";")
pwr$Global_active_power <- as.numeric(pwr$Global_active_power)
pwr$Date <- as.Date(pwr$Date, "%d/%m/%Y")
x <- subset(pwr, pwr$Date == "2007-02-01" | pwr$Date == "2007-02-02",
select=c(Global_active_power, Date, Time))
x$DT <- as.POSIXct(strptime(paste(x$Date, x$Time), format = "%Y-%m-%d %H:%M:%S"))
png(filename = "plot2.png", width = 480, height = 480)
plot(x$DT, x$Global_active_power, ylab = "Global Active Power (kilowatts)",
type="l", xlab="")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenericDefinition.R
\name{coordToPixel}
\alias{coordToPixel}
\title{coordToPixel
coordToPixel translates xy coordinates from the locator() function
to cell coordinates from the image function. Origo is according to
ToF-SIMS images the upper left corner.}
\usage{
coordToPixel(object, xy)
}
\arguments{
\item{object}{of class MassImage}
\item{xy}{numeric vector with x/y locator coordinate}
}
\value{
xy coordinate of MassImage pixels
}
\description{
coordToPixel
coordToPixel translates xy coordinates from the locator() function
to cell coordinates from the image function. Origo is according to
ToF-SIMS images the upper left corner.
}
| /man/coordToPixel.Rd | no_license | lorenzgerber/tofsims | R | false | true | 720 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenericDefinition.R
\name{coordToPixel}
\alias{coordToPixel}
\title{coordToPixel
coordToPixel translates xy coordinates from the locator() function
to cell coordinates from the image function. Origo is according to
ToF-SIMS images the upper left corner.}
\usage{
coordToPixel(object, xy)
}
\arguments{
\item{object}{of class MassImage}
\item{xy}{numeric vector with x/y locator coordinate}
}
\value{
xy coordinate of MassImage pixels
}
\description{
coordToPixel
coordToPixel translates xy coordinates from the locator() function
to cell coordinates from the image function. Origo is according to
ToF-SIMS images the upper left corner.
}
|
#' Build home page
#'
#' First looks for `index.Rmd` or `README.Rmd`, then
#' `index.md` or `README.md`. If none are found, falls back to the
#' description field in `DESCRIPTION`.
#'
#' @section YAML config:
#' To tweak the home page, you need a section called `home`.
#'
#' The sidebar links are automatically generated by inspecting the
#' `URL` and `BugReports` fields of the `DESCRIPTION`.
#' You can add additional links with a subsection called `links`,
#' which should contain a list of `text` + `href` elements:
#'
#' \preformatted{
#' home:
#' links:
#' - text: Link text
#' href: http://website.com
#' }
#'
#' The "developers" list is populated by the maintainer ("cre"), authors
#' ("aut"), and funder ("fnd").
#'
#' @section Badges:
#' Status badges are displayed in the sidebar under the section "Dev status".
#' This section is automatically populated if the first paragraph of the
#' homepage consists solely of status badges as linked images.
#'
#' @inheritParams build_articles
#' @param preview If `TRUE`, will preview freshly generated home page
#' @export
build_home <- function(pkg = ".", path = "docs", depth = 0L, encoding = "UTF-8",
preview = TRUE) {
rstudio_save_all()
old <- set_pkgdown_env("true")
on.exit(set_pkgdown_env(old))
pkg <- as_pkgdown(pkg)
path <- rel_path(path, pkg$path)
data <- data_home(pkg)
data$opengraph <- list(description = pkg$desc$get("Description")[[1]])
rule("Building home")
scoped_package_context(pkg$package, pkg$topic_index, pkg$article_index)
scoped_file_context(depth = depth)
# Copy license file, if present
license_path <- file.path(pkg$path, "LICENSE")
if (file.exists(license_path)) {
render_page(pkg, "license",
data = list(
pagetitle = "License",
license = read_file(license_path)
),
path = file.path(path, "LICENSE.html")
)
}
# Build authors page
if (has_citation(pkg$path)) {
build_citation_authors(pkg, path = path, depth = depth)
} else {
build_authors(pkg, path = path, depth = depth)
}
if (is.null(data$path)) {
data$index <- linkify(pkg$desc$get("Description")[[1]])
render_page(pkg, "home", data, out_path(path, "index.html"), depth = depth)
} else {
file_name <- tools::file_path_sans_ext(basename(data$path))
file_ext <- tools::file_ext(data$path)
if (file_ext == "md") {
data$index <- markdown(path = data$path, depth = 0L)
render_page(pkg, "home", data, out_path(path, "index.html"), depth = depth)
} else if (file_ext == "Rmd") {
if (identical(file_name, "README")) {
# Render once so that .md is up to date
cat_line("Updating ", file_name, ".md")
callr::r_safe(
function(input, encoding) {
rmarkdown::render(
input,
output_format = "github_document",
output_options = list(html_preview = FALSE),
quiet = TRUE,
encoding = encoding,
envir = globalenv()
)
},
args = list(
input = data$path,
encoding = encoding
)
)
}
input <- file.path(path, basename(data$path))
file.copy(data$path, input)
on.exit(unlink(input))
render_rmd(pkg, input, "index.html",
depth = depth,
data = data,
toc = FALSE,
strip_header = TRUE,
encoding = encoding
)
}
}
update_homepage_html(
out_path(path, "index.html"),
isTRUE(pkg$meta$home$strip_header)
)
if (preview) {
utils::browseURL(file.path(path, "index.html"))
}
invisible()
}
update_homepage_html <- function(path, strip_header = FALSE) {
html <- xml2::read_html(path, encoding = "UTF-8")
tweak_homepage_html(html, strip_header = strip_header)
xml2::write_html(html, path, format = FALSE)
path
}
data_home <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
path <- find_first_existing(pkg$path,
c("index.Rmd", "README.Rmd", "index.md", "README.md")
)
print_yaml(list(
pagetitle = pkg$desc$get("Title")[[1]],
sidebar = data_home_sidebar(pkg),
path = path
))
}
data_home_sidebar <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
if (!is.null(pkg$meta$home$sidebar))
return(pkg$meta$home$sidebar)
paste0(
data_home_sidebar_links(pkg),
data_home_sidebar_license(pkg),
data_home_sidebar_citation(pkg),
data_home_sidebar_authors(pkg),
collapse = "\n"
)
}
data_home_sidebar_license <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
paste0(
"<h2>License</h2>\n",
"<p>", autolink_license(pkg$desc$get("License")[[1]]), "</p>\n"
)
}
data_home_sidebar_links <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
links <- c(
data_link_repo(pkg),
data_link_github(pkg),
data_link_bug_report(pkg),
data_link_meta(pkg)
)
list_with_heading(links, "Links")
}
list_with_heading <- function(bullets, heading) {
if (length(bullets) == 0)
return(character())
paste0(
"<h2>", heading, "</h2>",
"<ul class='list-unstyled'>\n",
paste0("<li>", bullets, "</li>\n", collapse = ""),
"</ul>\n"
)
}
data_link_meta <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
links <- pkg$meta$home$links
if (length(links) == 0)
return(character())
links %>%
purrr::transpose() %>%
purrr::pmap_chr(link_url)
}
data_link_github <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
urls <- pkg$desc$get("URL") %>%
strsplit(",\\s+") %>%
`[[`(1)
github <- grepl("github\\.com", urls)
if (!any(github))
return(character())
link_url("Browse source code", urls[which(github)[[1]]])
}
data_link_bug_report <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
bug_reports <- pkg$desc$get("BugReports")[[1]]
if (is.na(bug_reports))
return(character())
link_url("Report a bug", bug_reports)
}
data_link_repo <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
name <- pkg$desc$get("Package")[[1]]
repo_result <- repo_url(name)
if (is.null(repo_result))
return(list())
if (names(repo_result) == "CRAN")
repo_link <- paste0("https://cran.r-project.org/package=", name)
else if (names(repo_result) == "BIOC")
repo_link <- paste0("https://www.bioconductor.org/packages/", name)
else
stop("Package link not supported")
link_url(
paste0("Download from ", names(repo_result)),
repo_link
)
}
cran_mirror <- function() {
cran <- as.list(getOption("repos"))[["CRAN"]]
if (is.null(cran) || identical(cran, "@CRAN@")) {
"https://cran.rstudio.com"
} else {
cran
}
}
bioc_mirror <- function() {
if (requireNamespace("BiocInstaller", quietly = TRUE)) {
bioc <- BiocInstaller::biocinstallRepos()[["BioCsoft"]]
} else {
bioc <- "https://bioconductor.org/packages/release/bioc"
}
bioc
}
repo_url <- function(pkg, cran = cran_mirror(), bioc = bioc_mirror()) {
bioc_pkgs <- utils::available.packages(contriburl = paste0(bioc, "/src/contrib"))
cran_pkgs <- utils::available.packages(contriburl = paste0(cran, "/src/contrib"))
avail <- if (pkg %in% rownames(cran_pkgs)) {
c(CRAN = paste0(cran, "/web/packages/", pkg, "/index.html"))
} else if (pkg %in% rownames(bioc_pkgs)) {
c(BIOC = paste0(bioc, "/html/", pkg, ".html"))
} else { NULL }
return(avail)
}
link_url <- function(text, href) {
label <- gsub("(/+)", "\\1​", href)
paste0(text, " at <br /><a href='", href, "'>", label, "</a>")
}
linkify <- function(text) {
text <- escape_html(text)
text <- gsub("<doi:([^&]+)>", # DOIs with < > & are not supported
"<<a href='https://doi.org/\\1'>doi:\\1</a>>",
text, ignore.case = TRUE)
text <- gsub("<arXiv:([^&]+)>",
"<<a href='https://arxiv.org/abs/\\1'>arXiv:\\1</a>>",
text, ignore.case = TRUE)
text <- gsub("<((http|ftp)[^&]+)>", # URIs with & are not supported
"<<a href='\\1'>\\1</a>>",
text)
text
}
| /R/build-home.R | no_license | romainfrancois/pkgdown | R | false | false | 8,051 | r | #' Build home page
#'
#' First looks for `index.Rmd` or `README.Rmd`, then
#' `index.md` or `README.md`. If none are found, falls back to the
#' description field in `DESCRIPTION`.
#'
#' @section YAML config:
#' To tweak the home page, you need a section called `home`.
#'
#' The sidebar links are automatically generated by inspecting the
#' `URL` and `BugReports` fields of the `DESCRIPTION`.
#' You can add additional links with a subsection called `links`,
#' which should contain a list of `text` + `href` elements:
#'
#' \preformatted{
#' home:
#' links:
#' - text: Link text
#' href: http://website.com
#' }
#'
#' The "developers" list is populated by the maintainer ("cre"), authors
#' ("aut"), and funder ("fnd").
#'
#' @section Badges:
#' Status badges are displayed in the sidebar under the section "Dev status".
#' This section is automatically populated if the first paragraph of the
#' homepage consists solely of status badges as linked images.
#'
#' @inheritParams build_articles
#' @param preview If `TRUE`, will preview freshly generated home page
#' @export
build_home <- function(pkg = ".", path = "docs", depth = 0L, encoding = "UTF-8",
preview = TRUE) {
rstudio_save_all()
old <- set_pkgdown_env("true")
on.exit(set_pkgdown_env(old))
pkg <- as_pkgdown(pkg)
path <- rel_path(path, pkg$path)
data <- data_home(pkg)
data$opengraph <- list(description = pkg$desc$get("Description")[[1]])
rule("Building home")
scoped_package_context(pkg$package, pkg$topic_index, pkg$article_index)
scoped_file_context(depth = depth)
# Copy license file, if present
license_path <- file.path(pkg$path, "LICENSE")
if (file.exists(license_path)) {
render_page(pkg, "license",
data = list(
pagetitle = "License",
license = read_file(license_path)
),
path = file.path(path, "LICENSE.html")
)
}
# Build authors page
if (has_citation(pkg$path)) {
build_citation_authors(pkg, path = path, depth = depth)
} else {
build_authors(pkg, path = path, depth = depth)
}
if (is.null(data$path)) {
data$index <- linkify(pkg$desc$get("Description")[[1]])
render_page(pkg, "home", data, out_path(path, "index.html"), depth = depth)
} else {
file_name <- tools::file_path_sans_ext(basename(data$path))
file_ext <- tools::file_ext(data$path)
if (file_ext == "md") {
data$index <- markdown(path = data$path, depth = 0L)
render_page(pkg, "home", data, out_path(path, "index.html"), depth = depth)
} else if (file_ext == "Rmd") {
if (identical(file_name, "README")) {
# Render once so that .md is up to date
cat_line("Updating ", file_name, ".md")
callr::r_safe(
function(input, encoding) {
rmarkdown::render(
input,
output_format = "github_document",
output_options = list(html_preview = FALSE),
quiet = TRUE,
encoding = encoding,
envir = globalenv()
)
},
args = list(
input = data$path,
encoding = encoding
)
)
}
input <- file.path(path, basename(data$path))
file.copy(data$path, input)
on.exit(unlink(input))
render_rmd(pkg, input, "index.html",
depth = depth,
data = data,
toc = FALSE,
strip_header = TRUE,
encoding = encoding
)
}
}
update_homepage_html(
out_path(path, "index.html"),
isTRUE(pkg$meta$home$strip_header)
)
if (preview) {
utils::browseURL(file.path(path, "index.html"))
}
invisible()
}
update_homepage_html <- function(path, strip_header = FALSE) {
html <- xml2::read_html(path, encoding = "UTF-8")
tweak_homepage_html(html, strip_header = strip_header)
xml2::write_html(html, path, format = FALSE)
path
}
data_home <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
path <- find_first_existing(pkg$path,
c("index.Rmd", "README.Rmd", "index.md", "README.md")
)
print_yaml(list(
pagetitle = pkg$desc$get("Title")[[1]],
sidebar = data_home_sidebar(pkg),
path = path
))
}
data_home_sidebar <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
if (!is.null(pkg$meta$home$sidebar))
return(pkg$meta$home$sidebar)
paste0(
data_home_sidebar_links(pkg),
data_home_sidebar_license(pkg),
data_home_sidebar_citation(pkg),
data_home_sidebar_authors(pkg),
collapse = "\n"
)
}
data_home_sidebar_license <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
paste0(
"<h2>License</h2>\n",
"<p>", autolink_license(pkg$desc$get("License")[[1]]), "</p>\n"
)
}
data_home_sidebar_links <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
links <- c(
data_link_repo(pkg),
data_link_github(pkg),
data_link_bug_report(pkg),
data_link_meta(pkg)
)
list_with_heading(links, "Links")
}
list_with_heading <- function(bullets, heading) {
if (length(bullets) == 0)
return(character())
paste0(
"<h2>", heading, "</h2>",
"<ul class='list-unstyled'>\n",
paste0("<li>", bullets, "</li>\n", collapse = ""),
"</ul>\n"
)
}
data_link_meta <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
links <- pkg$meta$home$links
if (length(links) == 0)
return(character())
links %>%
purrr::transpose() %>%
purrr::pmap_chr(link_url)
}
data_link_github <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
urls <- pkg$desc$get("URL") %>%
strsplit(",\\s+") %>%
`[[`(1)
github <- grepl("github\\.com", urls)
if (!any(github))
return(character())
link_url("Browse source code", urls[which(github)[[1]]])
}
data_link_bug_report <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
bug_reports <- pkg$desc$get("BugReports")[[1]]
if (is.na(bug_reports))
return(character())
link_url("Report a bug", bug_reports)
}
data_link_repo <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
name <- pkg$desc$get("Package")[[1]]
repo_result <- repo_url(name)
if (is.null(repo_result))
return(list())
if (names(repo_result) == "CRAN")
repo_link <- paste0("https://cran.r-project.org/package=", name)
else if (names(repo_result) == "BIOC")
repo_link <- paste0("https://www.bioconductor.org/packages/", name)
else
stop("Package link not supported")
link_url(
paste0("Download from ", names(repo_result)),
repo_link
)
}
cran_mirror <- function() {
cran <- as.list(getOption("repos"))[["CRAN"]]
if (is.null(cran) || identical(cran, "@CRAN@")) {
"https://cran.rstudio.com"
} else {
cran
}
}
bioc_mirror <- function() {
if (requireNamespace("BiocInstaller", quietly = TRUE)) {
bioc <- BiocInstaller::biocinstallRepos()[["BioCsoft"]]
} else {
bioc <- "https://bioconductor.org/packages/release/bioc"
}
bioc
}
repo_url <- function(pkg, cran = cran_mirror(), bioc = bioc_mirror()) {
bioc_pkgs <- utils::available.packages(contriburl = paste0(bioc, "/src/contrib"))
cran_pkgs <- utils::available.packages(contriburl = paste0(cran, "/src/contrib"))
avail <- if (pkg %in% rownames(cran_pkgs)) {
c(CRAN = paste0(cran, "/web/packages/", pkg, "/index.html"))
} else if (pkg %in% rownames(bioc_pkgs)) {
c(BIOC = paste0(bioc, "/html/", pkg, ".html"))
} else { NULL }
return(avail)
}
link_url <- function(text, href) {
label <- gsub("(/+)", "\\1​", href)
paste0(text, " at <br /><a href='", href, "'>", label, "</a>")
}
linkify <- function(text) {
text <- escape_html(text)
text <- gsub("<doi:([^&]+)>", # DOIs with < > & are not supported
"<<a href='https://doi.org/\\1'>doi:\\1</a>>",
text, ignore.case = TRUE)
text <- gsub("<arXiv:([^&]+)>",
"<<a href='https://arxiv.org/abs/\\1'>arXiv:\\1</a>>",
text, ignore.case = TRUE)
text <- gsub("<((http|ftp)[^&]+)>", # URIs with & are not supported
"<<a href='\\1'>\\1</a>>",
text)
text
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chart_amGauge.R
\name{amSolidGauge}
\alias{amSolidGauge}
\title{Plotting solid gauge using rAmCharts}
\usage{
amSolidGauge(
x,
min = 0,
max = 100,
type = "full",
width = 20,
color = "",
text = "",
textSize = 20,
...
)
}
\arguments{
\item{x}{\code{numeric}, value for which the angular gauge is desired.}
\item{min}{\code{numeric}, minimal possible value.}
\item{max}{\code{numeric}, maximal possible value.}
\item{type}{\code{character}, type of gauge : "full" or "semi".}
\item{width}{\code{numeric}, width of the gauge.}
\item{color}{\code{character}, hexadecimal color value or a vector of colors.}
\item{text}{\code{character}, text.}
\item{textSize}{\code{numeric}, text size.}
\item{...}{see \code{\link{amOptions}} for more options.}
}
\description{
amSolidGauge computes a gauge of the given value.
}
\examples{
\dontrun{
amSolidGauge(x = 65)
# Other examples available which can be time consuming depending on your configuration.
if (requireNamespace("pipeR", quietly = TRUE)) {
require(pipeR)
# Change min and max values
amSolidGauge(x = 65, min = 0, max = 200)
# Semi solid gauge
amSolidGauge(x = 65, type = "semi")
# Change width
amSolidGauge(x = 65, width = 50)
# Change color
amSolidGauge(x = 65, color = "#2F4F4F")
# Put a color scale
amSolidGauge(x = 10, color = c("#00ff00", "#ffd700", "#ff0000"))
amSolidGauge(x = 35, color = c("#00ff00", "#ffd700", "#ff0000"))
amSolidGauge(x = 70, color = c("#00ff00", "#ffd700", "#ff0000"))
amSolidGauge(x = 90, color = c("#00ff00", "#ffd700", "#ff0000"))
# Add some text to the printed value
amSolidGauge(x = 65, text = "Pct")
# Modify textSize value
amSolidGauge(x = 65, text = "Pct", textSize = 50)
}
}
}
\references{
See online documentation \url{https://datastorm-open.github.io/introduction_ramcharts/}
and \link{amChartsAPI}
}
\seealso{
\link{amOptions}, \link{amBarplot}, \link{amBoxplot}, \link{amHist}, \link{amPie},
\link{amPlot}, \link{amTimeSeries}, \link{amStockMultiSet}, \link{amBullet}, \link{amRadar},
\link{amWind}, \link{amFunnel}, \link{amAngularGauge}, \link{amSolidGauge}, \link{amMekko},
\link{amCandlestick}, \link{amFloatingBar}, \link{amOHLC}, \link{amWaterfall}
}
| /man/amSolidGauge.Rd | no_license | cran/rAmCharts | R | false | true | 2,353 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chart_amGauge.R
\name{amSolidGauge}
\alias{amSolidGauge}
\title{Plotting solid gauge using rAmCharts}
\usage{
amSolidGauge(
x,
min = 0,
max = 100,
type = "full",
width = 20,
color = "",
text = "",
textSize = 20,
...
)
}
\arguments{
\item{x}{\code{numeric}, value for which the angular gauge is desired.}
\item{min}{\code{numeric}, minimal possible value.}
\item{max}{\code{numeric}, maximal possible value.}
\item{type}{\code{character}, type of gauge : "full" or "semi".}
\item{width}{\code{numeric}, width of the gauge.}
\item{color}{\code{character}, hexadecimal color value or a vector of colors.}
\item{text}{\code{character}, text.}
\item{textSize}{\code{numeric}, text size.}
\item{...}{see \code{\link{amOptions}} for more options.}
}
\description{
amSolidGauge computes a gauge of the given value.
}
\examples{
\dontrun{
amSolidGauge(x = 65)
# Other examples available which can be time consuming depending on your configuration.
if (requireNamespace("pipeR", quietly = TRUE)) {
require(pipeR)
# Change min and max values
amSolidGauge(x = 65, min = 0, max = 200)
# Semi solid gauge
amSolidGauge(x = 65, type = "semi")
# Change width
amSolidGauge(x = 65, width = 50)
# Change color
amSolidGauge(x = 65, color = "#2F4F4F")
# Put a color scale
amSolidGauge(x = 10, color = c("#00ff00", "#ffd700", "#ff0000"))
amSolidGauge(x = 35, color = c("#00ff00", "#ffd700", "#ff0000"))
amSolidGauge(x = 70, color = c("#00ff00", "#ffd700", "#ff0000"))
amSolidGauge(x = 90, color = c("#00ff00", "#ffd700", "#ff0000"))
# Add some text to the printed value
amSolidGauge(x = 65, text = "Pct")
# Modify textSize value
amSolidGauge(x = 65, text = "Pct", textSize = 50)
}
}
}
\references{
See online documentation \url{https://datastorm-open.github.io/introduction_ramcharts/}
and \link{amChartsAPI}
}
\seealso{
\link{amOptions}, \link{amBarplot}, \link{amBoxplot}, \link{amHist}, \link{amPie},
\link{amPlot}, \link{amTimeSeries}, \link{amStockMultiSet}, \link{amBullet}, \link{amRadar},
\link{amWind}, \link{amFunnel}, \link{amAngularGauge}, \link{amSolidGauge}, \link{amMekko},
\link{amCandlestick}, \link{amFloatingBar}, \link{amOHLC}, \link{amWaterfall}
}
|
runscript <- function(x, method=c('run', 'copy', 'view', 'show', 'dir'),
ask = TRUE, fmt="ch%02d.R", package="FinTS",
subdir="scripts", lib.loc=NULL){
##
## 1. Set up
##
method <- match.arg(method)
# chnames <- c(...) # chapter names
# if (missing(x)) x <- match(select.list(chnames), chnames)
# s <- system.file("scripts", package = "FinTS")
s <- system.file(subdir, package = package, lib.loc=lib.loc)
{
if(missing(x)){
Ch0 <- dir(s, full.names=TRUE)
Ch.info <- file.info(Ch0)
Chs <- Ch0[!Ch.info$isdir]
chs <- dir(s)[!Ch.info$isdir]
ns <- length(chs)
if(ns<1){
cat("no files found in directory", s, "\n")
return()
}
firstLine <- chs
for(i in seq(1, length=ns)){
fL <- try(readLines(Chs[i], 1))
if(class(fL) != "try-error")
firstLine[i] <- paste(chs[i], fL, sep=" - ")
}
fL. <- (utils::select.list(firstLine)
== firstLine)
ch <- chs[fL.]
Ch <- Chs[fL.]
}
else {
# ch <- sprintf("ch%02d.R", x)
x <- as.numeric(x)
ch <- sprintf(fmt, x)
Ch <- paste(s, ch, sep="/")
}
}
##
## 2. method == 'dir'
##
if(method=='dir')return(Ch)
##
## 3. method == 'run'
##
if(method=='run'){
if(ask){
op <- graphics::par(ask=TRUE)
on.exit(graphics::par(op))
}
source(Ch, echo = TRUE)
return(invisible(Ch))
}
##
## 4. method == 'view'
##
ch. <- readLines(Ch)
#
if(method=='view'){
cat(ch., sep="\n")
return(invisible(Ch))
}
##
## 4a. method == 'show'
##
if(method=='show'){
file.show(Ch)
return(invisible(Ch))
}
##
## 5. method == 'copy'
##
writeLines(ch., ch)
return(invisible(Ch))
}
| /R/runscript.R | no_license | cran/FinTS | R | false | false | 1,768 | r | runscript <- function(x, method=c('run', 'copy', 'view', 'show', 'dir'),
ask = TRUE, fmt="ch%02d.R", package="FinTS",
subdir="scripts", lib.loc=NULL){
##
## 1. Set up
##
method <- match.arg(method)
# chnames <- c(...) # chapter names
# if (missing(x)) x <- match(select.list(chnames), chnames)
# s <- system.file("scripts", package = "FinTS")
s <- system.file(subdir, package = package, lib.loc=lib.loc)
{
if(missing(x)){
Ch0 <- dir(s, full.names=TRUE)
Ch.info <- file.info(Ch0)
Chs <- Ch0[!Ch.info$isdir]
chs <- dir(s)[!Ch.info$isdir]
ns <- length(chs)
if(ns<1){
cat("no files found in directory", s, "\n")
return()
}
firstLine <- chs
for(i in seq(1, length=ns)){
fL <- try(readLines(Chs[i], 1))
if(class(fL) != "try-error")
firstLine[i] <- paste(chs[i], fL, sep=" - ")
}
fL. <- (utils::select.list(firstLine)
== firstLine)
ch <- chs[fL.]
Ch <- Chs[fL.]
}
else {
# ch <- sprintf("ch%02d.R", x)
x <- as.numeric(x)
ch <- sprintf(fmt, x)
Ch <- paste(s, ch, sep="/")
}
}
##
## 2. method == 'dir'
##
if(method=='dir')return(Ch)
##
## 3. method == 'run'
##
if(method=='run'){
if(ask){
op <- graphics::par(ask=TRUE)
on.exit(graphics::par(op))
}
source(Ch, echo = TRUE)
return(invisible(Ch))
}
##
## 4. method == 'view'
##
ch. <- readLines(Ch)
#
if(method=='view'){
cat(ch., sep="\n")
return(invisible(Ch))
}
##
## 4a. method == 'show'
##
if(method=='show'){
file.show(Ch)
return(invisible(Ch))
}
##
## 5. method == 'copy'
##
writeLines(ch., ch)
return(invisible(Ch))
}
|
github_GET <- function(path, ..., host = "api.github.com", pat = github_pat(), use_curl = !is_standalone() && pkg_installed("curl")) {
url <- build_url(host, path)
if (isTRUE(use_curl)) {
h <- curl::new_handle()
headers <- c(
if (!is.null(pat)) {
c("Authorization" = paste0("token ", pat))
}
)
curl::handle_setheaders(h, .list = headers)
res <- curl::curl_fetch_memory(url, handle = h)
if (res$status_code >= 300) {
stop(github_error(res))
}
fromJSON(rawToChar(res$content))
} else {
tmp <- tempfile()
download(tmp, url, auth_token = pat)
fromJSONFile(tmp)
}
}
github_commit <- function(username, repo, ref = "master",
host = "api.github.com", pat = github_pat(), use_curl = !is_standalone() && pkg_installed("curl"), current_sha = NULL) {
url <- build_url(host, "repos", username, repo, "commits", utils::URLencode(ref, reserved = TRUE))
if (isTRUE(use_curl)) {
h <- curl::new_handle()
headers <- c(
"Accept" = "application/vnd.github.v3.sha",
if (!is.null(pat)) {
c("Authorization" = paste0("token ", pat))
}
)
if (!is.null(current_sha)) {
headers <- c(headers, "If-None-Match" = paste0('"', current_sha, '"'))
}
curl::handle_setheaders(h, .list = headers)
res <- curl::curl_fetch_memory(url, handle = h)
if (res$status_code == 304) {
return(current_sha)
}
if (res$status_code >= 300) {
stop(github_error(res))
}
rawToChar(res$content)
} else {
tmp <- tempfile()
on.exit(unlink(tmp), add = TRUE)
download(tmp, url, auth_token = pat)
get_json_field(readLines(tmp, warn = FALSE), "sha")
}
}
#' Retrieve Github personal access token.
#'
#' A github personal access token
#' Looks in env var `GITHUB_PAT`
#'
#' @keywords internal
#' @noRd
github_pat <- function(quiet = TRUE) {
pat <- Sys.getenv('GITHUB_PAT')
if (identical(pat, "")) return(NULL)
if (!quiet) {
message("Using github PAT from envvar GITHUB_PAT")
}
pat
}
github_DESCRIPTION <- function(username, repo, subdir = NULL, ref = "master", host = "api.github.com", ...,
use_curl = !is_standalone() && pkg_installed("curl"), pat = github_pat()) {
if (!is.null(subdir)) {
subdir <- utils::URLencode(subdir)
}
url <- build_url(host, "repos", username, repo, "contents", subdir, "DESCRIPTION")
url <- paste0(url, "?ref=", utils::URLencode(ref))
if (isTRUE(use_curl)) {
h <- curl::new_handle()
headers <- c(
"Accept" = "application/vnd.github.v3.raw",
if (!is.null(pat)) {
c("Authorization" = paste0("token ", pat))
}
)
curl::handle_setheaders(h, .list = headers)
res <- curl::curl_fetch_memory(url, handle = h)
if (res$status_code >= 300) {
stop(github_error(res))
}
rawToChar(res$content)
} else {
tmp <- tempfile()
on.exit(unlink(tmp), add = TRUE)
tmp <- tempfile()
download(tmp, url, auth_token = pat)
base64_decode(gsub("\\\\n", "", fromJSONFile(tmp)$content))
}
}
github_error <- function(res) {
res_headers <- curl::parse_headers_list(res$headers)
ratelimit_remaining <- res_headers$`x-ratelimit-remaining`
ratelimit_reset <- .POSIXct(res_headers$`x-ratelimit-reset`, tz = "UTC")
error_details <- fromJSON(rawToChar(res$content))$message
msg <- sprintf(
"HTTP error %s.
%s
Rate limit remaining: %s
Rate limit reset at: %s",
res$status_code,
error_details,
ratelimit_remaining,
format(ratelimit_reset, usetz = TRUE)
)
structure(list(message = msg, call = NULL), class = c("simpleError", "error", "condition"))
}
#> Error: HTTP error 404.
#> Not Found
#>
#> Rate limit remaining: 4999
#> Rate limit reset at: 2018-10-10 19:43:52 UTC
| /R/github.R | no_license | jimhester/remotes | R | false | false | 3,775 | r |
github_GET <- function(path, ..., host = "api.github.com", pat = github_pat(), use_curl = !is_standalone() && pkg_installed("curl")) {
url <- build_url(host, path)
if (isTRUE(use_curl)) {
h <- curl::new_handle()
headers <- c(
if (!is.null(pat)) {
c("Authorization" = paste0("token ", pat))
}
)
curl::handle_setheaders(h, .list = headers)
res <- curl::curl_fetch_memory(url, handle = h)
if (res$status_code >= 300) {
stop(github_error(res))
}
fromJSON(rawToChar(res$content))
} else {
tmp <- tempfile()
download(tmp, url, auth_token = pat)
fromJSONFile(tmp)
}
}
github_commit <- function(username, repo, ref = "master",
host = "api.github.com", pat = github_pat(), use_curl = !is_standalone() && pkg_installed("curl"), current_sha = NULL) {
url <- build_url(host, "repos", username, repo, "commits", utils::URLencode(ref, reserved = TRUE))
if (isTRUE(use_curl)) {
h <- curl::new_handle()
headers <- c(
"Accept" = "application/vnd.github.v3.sha",
if (!is.null(pat)) {
c("Authorization" = paste0("token ", pat))
}
)
if (!is.null(current_sha)) {
headers <- c(headers, "If-None-Match" = paste0('"', current_sha, '"'))
}
curl::handle_setheaders(h, .list = headers)
res <- curl::curl_fetch_memory(url, handle = h)
if (res$status_code == 304) {
return(current_sha)
}
if (res$status_code >= 300) {
stop(github_error(res))
}
rawToChar(res$content)
} else {
tmp <- tempfile()
on.exit(unlink(tmp), add = TRUE)
download(tmp, url, auth_token = pat)
get_json_field(readLines(tmp, warn = FALSE), "sha")
}
}
#' Retrieve Github personal access token.
#'
#' A github personal access token
#' Looks in env var `GITHUB_PAT`
#'
#' @keywords internal
#' @noRd
github_pat <- function(quiet = TRUE) {
pat <- Sys.getenv('GITHUB_PAT')
if (identical(pat, "")) return(NULL)
if (!quiet) {
message("Using github PAT from envvar GITHUB_PAT")
}
pat
}
github_DESCRIPTION <- function(username, repo, subdir = NULL, ref = "master", host = "api.github.com", ...,
use_curl = !is_standalone() && pkg_installed("curl"), pat = github_pat()) {
if (!is.null(subdir)) {
subdir <- utils::URLencode(subdir)
}
url <- build_url(host, "repos", username, repo, "contents", subdir, "DESCRIPTION")
url <- paste0(url, "?ref=", utils::URLencode(ref))
if (isTRUE(use_curl)) {
h <- curl::new_handle()
headers <- c(
"Accept" = "application/vnd.github.v3.raw",
if (!is.null(pat)) {
c("Authorization" = paste0("token ", pat))
}
)
curl::handle_setheaders(h, .list = headers)
res <- curl::curl_fetch_memory(url, handle = h)
if (res$status_code >= 300) {
stop(github_error(res))
}
rawToChar(res$content)
} else {
tmp <- tempfile()
on.exit(unlink(tmp), add = TRUE)
tmp <- tempfile()
download(tmp, url, auth_token = pat)
base64_decode(gsub("\\\\n", "", fromJSONFile(tmp)$content))
}
}
github_error <- function(res) {
res_headers <- curl::parse_headers_list(res$headers)
ratelimit_remaining <- res_headers$`x-ratelimit-remaining`
ratelimit_reset <- .POSIXct(res_headers$`x-ratelimit-reset`, tz = "UTC")
error_details <- fromJSON(rawToChar(res$content))$message
msg <- sprintf(
"HTTP error %s.
%s
Rate limit remaining: %s
Rate limit reset at: %s",
res$status_code,
error_details,
ratelimit_remaining,
format(ratelimit_reset, usetz = TRUE)
)
structure(list(message = msg, call = NULL), class = c("simpleError", "error", "condition"))
}
#> Error: HTTP error 404.
#> Not Found
#>
#> Rate limit remaining: 4999
#> Rate limit reset at: 2018-10-10 19:43:52 UTC
|
library(ape)
testtree <- read.tree("8690_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8690_2_unrooted.txt") | /codeml_files/newick_trees_processed/8690_2/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("8690_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8690_2_unrooted.txt") |
#' Select a geographic region around the distribution of a species
#'
#' This function takes as arguments a SpatialPolygonsDataFrame with a column representing whether or not a species was detected in each polygon. It then selects a subset of the shape focused around the area of detections.
#' @param x SpatialPolygonsDataFrame
#' @param detect Character, name of column in \code{x} that contains detection data. Zero values are assumed to represent no detections.
#' @param expand Positive numeric, determines size of the focal region. For example, 0.5 corresponds to a region that comprises the occupied polygons plus a buffer with a width about 50% as large as the distance from the centroid of occupied polygons to the farthest occupied polygon.
#' @param upper Character, name of column in \code{x} that contains names of "upper" level polygons that contain "lower" level polygons which are at the scale at which detection/non-detection is scored. This argument is used to remove all lower-level polygons that fall in the same upper-level polygon but have no detections among them. For example, if upper-level polygons are states/provinces and lower-level polygons are counties with detections, then this could be used to remove all states/provinces with no counties with detections. Note that \code{minLowerPolysPerUpper} must be specified. A value of \code{NULL} (default) will not remove any polygons regardless of the values of \code{minLowerPolysPerUpper}.
#' @param cullIslands Logical, if \code{TRUE} (default) then remove islands (i.e., counties with no neighbors). Islands are only removed if all islands in an "upper" polygon (i.e., state, province) have no detections.
#' @param minLowerPolysPerUpper Character, only used if \code{upper} is not \code{NULL}. Minimum number of lower-level polygons necessary to be present in an upper-level polygon if none of them have any detections.
#' @return SpatialPolygonsDataFrame.
#' @examples
#' \donttest{
#'
#' library(sp)
#'
#' # polygons of Mexican counties
#' x <- raster::getData('GADM', country='MEX', level=2)
#'
#' # generate detections
#' set.seed(123)
#' x@data$detect <- 0
#' n <- sum(x@data$NAME_1 == 'Aguascalientes')
#' x@data$detect[x@data$NAME_1 == 'Aguascalientes'] <- rpois(n, 7)
#'
#' n <- sum(x@data$NAME_1 == 'Jalisco')
#' x@data$detect[x@data$NAME_1 == 'Jalisco'] <- rpois(n, 4)
#'
#' n <- sum(x@data$NAME_1 == 'Aguascalientes')
#' x@data$detect[x@data$NAME_1 == 'Aguascalientes'] <- rpois(n, 1)
#'
#' cols <- x@data$detect / max(x@data$detect)
#' cols <- scales::alpha('darkred', cols)
#' plot(x, col=cols, border='gray90')
#'
#' # generate focal region around detections
#' focus1 <- getGeogFocus(x, detect='detect', expand=0.1)
#' focus2 <- getGeogFocus(x, detect='detect', expand=0.5)
#' focus3 <- getGeogFocus(x, detect='detect', expand=0.1, upper='NAME_1')
#' focus4 <- getGeogFocus(x, detect='detect', expand=0.5, upper='NAME_1')
#'
#' # plot
#' par(mfrow=c(2, 2))
#'
#' cols <- focus1@data$detect / max(focus1@data$detect)
#' cols <- scales::alpha('darkred', cols)
#' plot(focus1, col=cols, border='gray90', main='expand=0.1')
#'
#' cols <- focus2@data$detect / max(focus2@data$detect)
#' cols <- scales::alpha('darkred', cols)
#' plot(focus2, col=cols, border='gray90', main='expand=0.5')
#'
#' cols <- focus3@data$detect / max(focus3@data$detect)
#' cols <- scales::alpha('darkred', cols)
#' plot(focus3, col=cols, border='gray90', main='expand=0.1 | sans sparse')
#'
#' cols <- focus4@data$detect / max(focus4@data$detect)
#' cols <- scales::alpha('darkred', cols)
#' plot(focus4, col=cols, border='gray90', main='expand=0.5 | sans sparse')
#'
#'
#' }
#' @export
getGeogFocus <- function(
x,
detect,
expand = 0.3,
upper = NULL,
cullIslands = TRUE,
minLowerPolysPerUpper = 10
) {
mollweide <- '+proj=moll +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
origProj <- raster::projection(x)
xSpEa <- sp::spTransform(x, sp::CRS(mollweide))
# get MCP of polygon representing counties with detections
speciesIndex <- which(xSpEa@data[ , detect] > 0)
occs2SpEa <- xSpEa[speciesIndex, ]
mcpSpEa <- enmSdm::mcpFromPolygons(occs2SpEa)
# study region includes MCP and all counties with records
occsDissolveSpEa <- rgeos::gUnaryUnion(occs2SpEa)
occsPlusMcpSpEa <- rgeos::gUnion(occsDissolveSpEa, mcpSpEa)
centSpEa <- rgeos::gCentroid(occsPlusMcpSpEa)
distFromCent_m <- rgeos::gDistance(occsPlusMcpSpEa, centSpEa, hausdorff=TRUE)
# study region includes buffer region around this area
focalAreaSpEa <- rgeos::gBuffer(occsPlusMcpSpEa, width=expand * distFromCent_m)
# get counties in this area
xSpEa$id <- 1:nrow(xSpEa)
focalAreaSp <- sp::spTransform(focalAreaSpEa, enmSdm::getCRS('wgs84'))
focalIndices <- sp::over(focalAreaSpEa, xSpEa, returnList=TRUE)
focalIndices <- sp::over(focalAreaSpEa, xSpEa, returnList=TRUE)
focusSpEa <- x[focalIndices[[1]]$id, ]
# remove islands with no detections
if (cullIslands) {
neighs <- spdep::poly2nb(focusSpEa, queen=TRUE)
islandIndex <- which(sapply(neighs, function(focusSpEa) { length(focusSpEa == 1) && (focusSpEa == 0) }))
bads <- integer()
if (length(islandIndex) > 0) {
islandUppers <- unique(x@data[islandIndex, upper])
for (islandUpper in islandUppers) {
lowersInUppers <- which(x@data[ , upper] == islandUpper)
lowersAreIslands <- intersect(lowersInUppers, islandIndex)
if (all(focusSpEa@data[lowersAreIslands, detect] == 0)) bads <- c(bads, lowersAreIslands)
}
}
if (length(bads) > 0) focusSpEa <- focusSpEa[-bads, ]
}
# removing states with with no detections that have too-few counties
if (!is.null(upper)) {
uppers <- unique(focusSpEa@data[ , upper])
for (thisUpper in uppers) {
focusUpper <- focusSpEa[focusSpEa@data[ , upper] == thisUpper, ]
if (all(focusUpper@data[ , detect] == 0) & nrow(focusUpper) < minLowerPolysPerUpper) {
focusSpEa <- focusSpEa[-which(focusSpEa@data[ , upper] == thisUpper), ]
}
}
}
focusSp <- sp::spTransform(focusSpEa, sp::CRS(origProj))
focusSp
}
| /code/getGeogFocus.r | permissive | adamlilith/tropicosMassModeling | R | false | false | 6,072 | r | #' Select a geographic region around the distribution of a species
#'
#' This function takes as arguments a SpatialPolygonsDataFrame with a column representing whether or not a species was detected in each polygon. It then selects a subset of the shape focused around the area of detections.
#' @param x SpatialPolygonsDataFrame
#' @param detect Character, name of column in \code{x} that contains detection data. Zero values are assumed to represent no detections.
#' @param expand Positive numeric, determines size of the focal region. For example, 0.5 corresponds to a region that comprises the occupied polygons plus a buffer with a width about 50% as large as the distance from the centroid of occupied polygons to the farthest occupied polygon.
#' @param upper Character, name of column in \code{x} that contains names of "upper" level polygons that contain "lower" level polygons which are at the scale at which detection/non-detection is scored. This argument is used to remove all lower-level polygons that fall in the same upper-level polygon but have no detections among them. For example, if upper-level polygons are states/provinces and lower-level polygons are counties with detections, then this could be used to remove all states/provinces with no counties with detections. Note that \code{minLowerPolysPerUpper} must be specified. A value of \code{NULL} (default) will not remove any polygons regardless of the values of \code{minLowerPolysPerUpper}.
#' @param cullIslands Logical, if \code{TRUE} (default) then remove islands (i.e., counties with no neighbors). Islands are only removed if all islands in an "upper" polygon (i.e., state, province) have no detections.
#' @param minLowerPolysPerUpper Character, only used if \code{upper} is not \code{NULL}. Minimum number of lower-level polygons necessary to be present in an upper-level polygon if none of them have any detections.
#' @return SpatialPolygonsDataFrame.
#' @examples
#' \donttest{
#'
#' library(sp)
#'
#' # polygons of Mexican counties
#' x <- raster::getData('GADM', country='MEX', level=2)
#'
#' # generate detections
#' set.seed(123)
#' x@data$detect <- 0
#' n <- sum(x@data$NAME_1 == 'Aguascalientes')
#' x@data$detect[x@data$NAME_1 == 'Aguascalientes'] <- rpois(n, 7)
#'
#' n <- sum(x@data$NAME_1 == 'Jalisco')
#' x@data$detect[x@data$NAME_1 == 'Jalisco'] <- rpois(n, 4)
#'
#' n <- sum(x@data$NAME_1 == 'Aguascalientes')
#' x@data$detect[x@data$NAME_1 == 'Aguascalientes'] <- rpois(n, 1)
#'
#' cols <- x@data$detect / max(x@data$detect)
#' cols <- scales::alpha('darkred', cols)
#' plot(x, col=cols, border='gray90')
#'
#' # generate focal region around detections
#' focus1 <- getGeogFocus(x, detect='detect', expand=0.1)
#' focus2 <- getGeogFocus(x, detect='detect', expand=0.5)
#' focus3 <- getGeogFocus(x, detect='detect', expand=0.1, upper='NAME_1')
#' focus4 <- getGeogFocus(x, detect='detect', expand=0.5, upper='NAME_1')
#'
#' # plot
#' par(mfrow=c(2, 2))
#'
#' cols <- focus1@data$detect / max(focus1@data$detect)
#' cols <- scales::alpha('darkred', cols)
#' plot(focus1, col=cols, border='gray90', main='expand=0.1')
#'
#' cols <- focus2@data$detect / max(focus2@data$detect)
#' cols <- scales::alpha('darkred', cols)
#' plot(focus2, col=cols, border='gray90', main='expand=0.5')
#'
#' cols <- focus3@data$detect / max(focus3@data$detect)
#' cols <- scales::alpha('darkred', cols)
#' plot(focus3, col=cols, border='gray90', main='expand=0.1 | sans sparse')
#'
#' cols <- focus4@data$detect / max(focus4@data$detect)
#' cols <- scales::alpha('darkred', cols)
#' plot(focus4, col=cols, border='gray90', main='expand=0.5 | sans sparse')
#'
#'
#' }
#' @export
getGeogFocus <- function(
x,
detect,
expand = 0.3,
upper = NULL,
cullIslands = TRUE,
minLowerPolysPerUpper = 10
) {
mollweide <- '+proj=moll +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
origProj <- raster::projection(x)
xSpEa <- sp::spTransform(x, sp::CRS(mollweide))
# get MCP of polygon representing counties with detections
speciesIndex <- which(xSpEa@data[ , detect] > 0)
occs2SpEa <- xSpEa[speciesIndex, ]
mcpSpEa <- enmSdm::mcpFromPolygons(occs2SpEa)
# study region includes MCP and all counties with records
occsDissolveSpEa <- rgeos::gUnaryUnion(occs2SpEa)
occsPlusMcpSpEa <- rgeos::gUnion(occsDissolveSpEa, mcpSpEa)
centSpEa <- rgeos::gCentroid(occsPlusMcpSpEa)
distFromCent_m <- rgeos::gDistance(occsPlusMcpSpEa, centSpEa, hausdorff=TRUE)
# study region includes buffer region around this area
focalAreaSpEa <- rgeos::gBuffer(occsPlusMcpSpEa, width=expand * distFromCent_m)
# get counties in this area
xSpEa$id <- 1:nrow(xSpEa)
focalAreaSp <- sp::spTransform(focalAreaSpEa, enmSdm::getCRS('wgs84'))
focalIndices <- sp::over(focalAreaSpEa, xSpEa, returnList=TRUE)
focalIndices <- sp::over(focalAreaSpEa, xSpEa, returnList=TRUE)
focusSpEa <- x[focalIndices[[1]]$id, ]
# remove islands with no detections
if (cullIslands) {
neighs <- spdep::poly2nb(focusSpEa, queen=TRUE)
islandIndex <- which(sapply(neighs, function(focusSpEa) { length(focusSpEa == 1) && (focusSpEa == 0) }))
bads <- integer()
if (length(islandIndex) > 0) {
islandUppers <- unique(x@data[islandIndex, upper])
for (islandUpper in islandUppers) {
lowersInUppers <- which(x@data[ , upper] == islandUpper)
lowersAreIslands <- intersect(lowersInUppers, islandIndex)
if (all(focusSpEa@data[lowersAreIslands, detect] == 0)) bads <- c(bads, lowersAreIslands)
}
}
if (length(bads) > 0) focusSpEa <- focusSpEa[-bads, ]
}
# removing states with with no detections that have too-few counties
if (!is.null(upper)) {
uppers <- unique(focusSpEa@data[ , upper])
for (thisUpper in uppers) {
focusUpper <- focusSpEa[focusSpEa@data[ , upper] == thisUpper, ]
if (all(focusUpper@data[ , detect] == 0) & nrow(focusUpper) < minLowerPolysPerUpper) {
focusSpEa <- focusSpEa[-which(focusSpEa@data[ , upper] == thisUpper), ]
}
}
}
focusSp <- sp::spTransform(focusSpEa, sp::CRS(origProj))
focusSp
}
|
#####################################################################
## load required libraries ##########################################
#####################################################################
#install.packages("poLCA")
library(poLCA)
#####################################################################
## set working directory and load data ##############################
#####################################################################
## Methodology Reference: Drew Linzer and Jeffrey Lewis:
## poLCA: An R Package for Polytomous Variable Latent Class Analysis
## (Journal of Statistical Software)
## Accessed online: http://www.sscnet.ucla.edu/polisci/faculty/lewis/pdf/poLCA-JSS-final.pdf
## example based on suggested application by Linzer and Lewis (see reference above)
## Data Reference:
## Agresti, Alan. 2002. Categorical Data Analysis, second edition.
## Hoboken: John Wiley & Sons.
## See also: http://www.ncbi.nlm.nih.gov/pubmed/843571
setwd("~/Documents/Website/carcinoma_lca")
data(carcinoma)
#####################################################################
## function to identify ideal number of clusters ####################
#####################################################################
lca_cluster_count <- function(data, formula, cluster_range = 2:10,
maxiter = 5000){
fit_results <- cbind.data.frame(nclust = cluster_range,
AIC = rep(NA, length(cluster_range)),
BIC = rep(NA, length(cluster_range)))
for(i in 1:length(cluster_range)){
## run LCA
lca_results <- poLCA(formula, data, nclass = cluster_range[i], maxiter = maxiter, verbose = FALSE)
## save fit results
fit_results[i, ]$AIC <- lca_results$aic
fit_results[i, ]$BIC <- lca_results$bic}
## generate plot
plot(x = fit_results$nclust, y = fit_results$AIC, type = "l", col = "dark green",
ylim = c(min(c(fit_results$AIC, fit_results$BIC)),
max(c(fit_results$AIC, fit_results$BIC))),
xlab = "Number of Classes",
ylab = "AIC/BIC",
main = "Fit Results by Number of Classes",
lwd = 2)
lines(x = fit_results$nclust, y = fit_results$BIC, type = "l", col = "blue", lwd = 2)
legend("topleft",
c("AIC", "BIC"),
lty = 1, lwd = 1.5,
col = c("dark green", "blue"),
bg = "gray90")
return(fit_results)
print(fit_results)
}
#####################################################################
## identify ideal number of clusters ################################
#####################################################################
cluster_count <- lca_cluster_count(data = carcinoma,
formula = cbind(A,B,C,D,E,F,G)~1,
cluster_range = 2:10)
#####################################################################
## identify ideal number of clusters ################################
#####################################################################
set.seed(1202)
lca_carcinoma <- poLCA(data = carcinoma,
formula = cbind(A,B,C,D,E,F,G)~1,
maxiter = 5000,
nclass = 3)
#jpeg(filename = "rater_scores.jpeg", width = 850, height = 400, type = "quartz")
par(mar=c(5.1, 4.1, 4.1, 16), xpd=TRUE)
plot(sapply(names(lca_carcinoma$probs),function(x) unlist(lca_carcinoma$probs[[x]][ 1 , 1])),
ylab = "Cancer 'Rating'",
main = "Cancer 'Ratings' Across Pathologists \n (How certain are pathologists than a given tissue sample reflects the presence of carcinoma?)",
xlab = "Pathologist ID",
type = "l",
ylim = c(0, 1),
lty = 1,
lwd = 2,
xaxt = 'n')
axis(1, at = 1:7, labels = names(lca_carcinoma$probs))
lines(sapply(names(lca_carcinoma$probs),function(x) unlist(lca_carcinoma$probs[[x]][ 2 , 1])),
lty = 2, lwd = 2)
lines(sapply(names(lca_carcinoma$probs),function(x) unlist(lca_carcinoma$probs[[x]][ 3 , 1])),
lty = 3, lwd = 2)
legend("topleft",
lwd = 2, lty = rev(1:3),
legend = c(paste("Class A: \n Likely Cancerous \n(n = ", as.numeric(table(lca_carcinoma$predclass)[1]), " samples)\n", sep = ''),
paste("Class B: \n Unclear Pathology \n(n = ", as.numeric(table(lca_carcinoma$predclass)[2]), " samples)\n", sep = ''),
paste("Class C: \n Unlikely to be Cancerous \n(n = ", as.numeric(table(lca_carcinoma$predclass)[3]), " samples)\n", sep = '')),
inset=c(1.05,0))
#dev.off()
| /carcinoma_lca/lca_carcinoma.R | no_license | seaneff/website | R | false | false | 4,541 | r | #####################################################################
## load required libraries ##########################################
#####################################################################
#install.packages("poLCA")
library(poLCA)
#####################################################################
## set working directory and load data ##############################
#####################################################################
## Methodology Reference: Drew Linzer and Jeffrey Lewis:
## poLCA: An R Package for Polytomous Variable Latent Class Analysis
## (Journal of Statistical Software)
## Accessed online: http://www.sscnet.ucla.edu/polisci/faculty/lewis/pdf/poLCA-JSS-final.pdf
## example based on suggested application by Linzer and Lewis (see reference above)
## Data Reference:
## Agresti, Alan. 2002. Categorical Data Analysis, second edition.
## Hoboken: John Wiley & Sons.
## See also: http://www.ncbi.nlm.nih.gov/pubmed/843571
setwd("~/Documents/Website/carcinoma_lca")
data(carcinoma)
#####################################################################
## function to identify ideal number of clusters ####################
#####################################################################
lca_cluster_count <- function(data, formula, cluster_range = 2:10,
maxiter = 5000){
fit_results <- cbind.data.frame(nclust = cluster_range,
AIC = rep(NA, length(cluster_range)),
BIC = rep(NA, length(cluster_range)))
for(i in 1:length(cluster_range)){
## run LCA
lca_results <- poLCA(formula, data, nclass = cluster_range[i], maxiter = maxiter, verbose = FALSE)
## save fit results
fit_results[i, ]$AIC <- lca_results$aic
fit_results[i, ]$BIC <- lca_results$bic}
## generate plot
plot(x = fit_results$nclust, y = fit_results$AIC, type = "l", col = "dark green",
ylim = c(min(c(fit_results$AIC, fit_results$BIC)),
max(c(fit_results$AIC, fit_results$BIC))),
xlab = "Number of Classes",
ylab = "AIC/BIC",
main = "Fit Results by Number of Classes",
lwd = 2)
lines(x = fit_results$nclust, y = fit_results$BIC, type = "l", col = "blue", lwd = 2)
legend("topleft",
c("AIC", "BIC"),
lty = 1, lwd = 1.5,
col = c("dark green", "blue"),
bg = "gray90")
return(fit_results)
print(fit_results)
}
#####################################################################
## identify ideal number of clusters ################################
#####################################################################
cluster_count <- lca_cluster_count(data = carcinoma,
formula = cbind(A,B,C,D,E,F,G)~1,
cluster_range = 2:10)
#####################################################################
## identify ideal number of clusters ################################
#####################################################################
set.seed(1202)
lca_carcinoma <- poLCA(data = carcinoma,
formula = cbind(A,B,C,D,E,F,G)~1,
maxiter = 5000,
nclass = 3)
#jpeg(filename = "rater_scores.jpeg", width = 850, height = 400, type = "quartz")
par(mar=c(5.1, 4.1, 4.1, 16), xpd=TRUE)
plot(sapply(names(lca_carcinoma$probs),function(x) unlist(lca_carcinoma$probs[[x]][ 1 , 1])),
ylab = "Cancer 'Rating'",
main = "Cancer 'Ratings' Across Pathologists \n (How certain are pathologists than a given tissue sample reflects the presence of carcinoma?)",
xlab = "Pathologist ID",
type = "l",
ylim = c(0, 1),
lty = 1,
lwd = 2,
xaxt = 'n')
axis(1, at = 1:7, labels = names(lca_carcinoma$probs))
lines(sapply(names(lca_carcinoma$probs),function(x) unlist(lca_carcinoma$probs[[x]][ 2 , 1])),
lty = 2, lwd = 2)
lines(sapply(names(lca_carcinoma$probs),function(x) unlist(lca_carcinoma$probs[[x]][ 3 , 1])),
lty = 3, lwd = 2)
legend("topleft",
lwd = 2, lty = rev(1:3),
legend = c(paste("Class A: \n Likely Cancerous \n(n = ", as.numeric(table(lca_carcinoma$predclass)[1]), " samples)\n", sep = ''),
paste("Class B: \n Unclear Pathology \n(n = ", as.numeric(table(lca_carcinoma$predclass)[2]), " samples)\n", sep = ''),
paste("Class C: \n Unlikely to be Cancerous \n(n = ", as.numeric(table(lca_carcinoma$predclass)[3]), " samples)\n", sep = '')),
inset=c(1.05,0))
#dev.off()
|
library(svglite)
library(readr)
library(Biostrings)
library(Rsamtools)
library(rtracklayer)
library(GenomicFeatures)
library(data.table)
library(stringr)
#load some utility functions
source("/fast_new/work/groups/ag_ohler/dharnet_m/Ribo_Lausanne/functions.R")
#load excel data, parse
stopifnot(c("ere_Peptides", "lncRNA_peptides") == readxl::excel_sheets('ext_data/20180719_OD5P_lncRNA_RE_peptides_combiner.xlsx'))
#read in the data on the repeat associated peptides
ere_table <- readxl::read_excel('ext_data/20180719_OD5P_lncRNA_RE_peptides_combiner.xlsx',sheet=1)
colnames(ere_table) <- c("MS_identified_peptide", "ID", "seqnames", "start", "end", "repName",
"strand", "class.fam", "unMerged", "FPKM RNA seq OD5PCTRL", "FPKM RNA seq OD5PDAC")
#read in the data on the lincRNA peptides
lncRNA_table <- readxl::read_excel('ext_data/20180719_OD5P_lncRNA_RE_peptides_combiner.xlsx',sheet=2)%>%
set_colnames(c("MS-identified peptide", "gene_id", "Gene Name",
"Gene Type", "FPKM RNAseq OD5PCTRL", "FPKM RNAseq DAC"))
#import genome as fasta file object
genome <- ('../genomes/hg38.fa'%T>%{stopifnot(file.exists(.)) })%>%FaFile
annofile <- 'annotation/gencode.v22.annotation.gtf'%T>%{stopifnot(file.exists(.)) }
ere_gr <- DT2GR(ere_table,seqinf=seqinfo(genome))
read_compressed_gfile <- function(annofile,annotype,fformat='gtf'){
f=tempfile();
stopifnot(file.exists(annofile))
catbin = ifelse(tools::file_ext(annofile)=='gz','zcat','cat')
system(str_interp('${catbin} ${annofile} | grep -e "\t${annotype}\t" > ${f}'));
out = import(f,format=fformat)
file.remove(f)
out
}
genes<-read_compressed_gfile(annofile,'gene')
start_codons<-read_compressed_gfile(annofile,'start_codon')
transcripts<-read_compressed_gfile(annofile,'transcript')
exons<-read_compressed_gfile(annofile,'exon')
names(exons)<-exons$transcript_id
exons<-makeTxDbFromGRanges(exons)%>%exonsBy(use.names=TRUE)
#now get the relevant genes for our lincRNAs
stopifnot(all(lncRNA_table$gene_id %in% genes$gene_id))
lincgenes <- subset(genes, gene_id %in% lncRNA_table$gene_id)
#let's play with maptToTrnascripts
# testexons <- exons%>%head(10e3)%>%subset(transcript_id!=last(transcript_id))
# # rpls <- genes%>%subset(gene_name%>%str_detect('RPL'))
# rpls19 <- genes%>%subset(gene_name%>%str_detect('RPL'))
# import('bigwigs/star_OD5P_05_uM_DAC_1_Aligned.sortedByCoord.out.bam_Ribo_coverage_plus.bw',which = rpls)%>%{strand(.)<-'+';.} %>%
# subsetByOverlaps(rpls)%>%.$score%>%sum
# import('bigwigs/star_OD5P_05_uM_DAC_1_Aligned.sortedByCoord.out.bam_Ribo_coverage_plus.bw',which = rpls)%>%{strand(.)<-'+';.} %>%
# subsetByOverlaps(rpls19)%>%.$score%>%sum
##########Producing metaprofiles to
allbigwigs<-Sys.glob('bigwigs/*_Ribo_coverage_*.bw')
bigwigpairlist <- allbigwigs%>%
data_frame(file=.)%>%
mutate(base=file%>%basename%>%str_replace('plus|minus',''))%>%
mutate(strand=file%>%basename%>%str_extract('plus|minus'))%>%
mutate(strand = case_when(
strand=='plus'~'+',
strand=='minus'~'-'
)) %>%
arrange(desc(strand))%>%
{split(.,.$base)}%>%{map(.,~split(.$file,.$strand))%>%map(rev)}
stopifnot(bigwigpairlist%>%names%>%n_distinct%>%`>`(3))
stopifnot(bigwigpairlist%>%.[[1]]%>%names%>%`==`(c('+','-')))
startcod_windows<-start_codons%>%
# subset(gene_name%>%str_detect('RPL'))%>%
resize(101,'center')
#TODO this logic can be split up a bit to work on bam files say
get_5p_profiles<- function(startcod_windows,bigwigpair){
stopifnot(c('+','-') %in% names(bigwigpair))
# for(i in ls()) assign(i,get(i),envir=.GlobalEnv)
fp_profile_data <-
lapply(bigwigpair,FUN=import,which = startcod_windows)%>%
{for(strandi in names(.)) strand(.[[strandi]]) <- strandi;.}%>%
Reduce(f=c,.)%>%
mergeByOverlaps(startcod_windows[,'transcript_id'])%>%
mergeGR2DT%>%
transmute(tid=transcript_id,pos = start-start.1+1,score = score)%>%
group_by(tid)%>%
# dplyr::filter(sum(score)>20)%>%
mutate(score = score /sum(score))%>%
mutate(pos=pos-50)%>%
group_by(pos)%>%
summarize(score=mean(score))%>%
mutate(frame=as.factor(pos %% 3))
return(fp_profile_data)
}
pairnum = 1
fp_profile_data <- get_5p_profiles(startcod_windows[,'transcript_id'],bigwigpairlist[[pairnum]])
rangenames <- 'Start Codon Regions'
ribofilenames <- names(bigwigpairlist)[pairnum]
fp_profplot <-
ggplot(fp_profile_data,aes(fill=frame,x=pos,y=score))+
geom_bar(stat='identity')+
coord_cartesian(xlim=-50:50)+
ggtitle(str_interp("5' Metaprofile for ranges:\n${rangenames}\nfile:${ribofilenames}"))
theme_bw()
fpproffile <- 'plots/fp_profplot_lorenzRibo_rpls.svg'%T>%{normalizePath(.)%>%message}
svglite(fpproffile);print(fp_profplot);dev.off()
# sitesovergenes <-
# lapply(bigwigpairlist[[1]],import,which = unlist(genes)%>%head(1000))%>%
# {for(strandi in names(.)) strand(.[[strandi]]) <- strandi;.}%>%
# Reduce(f=c,.)%>%
# subset(score>0)
summarize_ov_score <- function(genes,sitesovergenes,myfun=sum){
data_frame(gene=queryHits(ov),score=sitesovergenes$score[subjectHits(ov)])%>%
left_join(data_frame(gene=seq_along(genes)),.)%>%
group_by(gene)%>%
dplyr::summarise(score=myfun(score))%>%
arrange()
.$score
}
# genes$score <- summarize_ov_score(genes,sitesovergenes)
# genes$score%>%table
#############Now individual Plots
#TODO this logic can be split up a bit to work on bam files say
get_riboprofdata<- function(exons_tr,bigwigpair){
message( names(bigwigpair))
stopifnot(c('+','-') %in% names(bigwigpair))
for(i in ls()) assign(i,get(i),envir=.GlobalEnv)
seqlevs <-list(profilegrange,exons_tr)%>%unique%>%as.character
shared_seqinfo <- suppressWarnings(intersect(seqinfo(BigWigFile(bigwigpair[[1]])),seqinfo(exons_tr)))
trseqinfo <- Seqinfo(seqnames=names(exons_tr),seqlengths=as.vector(sum(width(exons_tr))))
profilegrange <-
suppressWarnings(lapply(bigwigpair,import,which = unlist(exons_tr)))%>%
{for(strandi in names(.)) strand(.[[strandi]]) <- strandi;.}%>%
Reduce(f=c,.)%>%
subset(score>0)
#now map our prifle data to the exons space
rle <- suppressWarnings(mapToTranscripts(profilegrange,exons_tr))
rle$score<-profilegrange$score[rle$xHits];
seqinfo(rle)<-trseqinfo[seqinfo(rle)@seqnames]
rle <- coverage(rle, weight='score')
rle %>%
#selecting the transcirpt we want
lapply(FUN=.%>%
#turn that into a dataframe
{
pos = which(.!=0)
data_frame(pos=pos, score = as.vector(.[pos]))
}
)%>%
bind_rows(.id='tid')%>%
mutate(frame=as.factor(pos %% 3))
}
#pick a set of files and a transcript
mytrans <- transcripts%>%subset(gene_name%>%str_detect('RPL'))%>%sample(1)
pairnum = 1
#get the profile data
riboprofdata <- get_riboprofdata(exons[mytrans$transcript_id],bigwigpairlist[[pairnum]])
#check there's data
stopifnot(riboprofdata$score%>%sum%>%`>`(0))
#now print the plot
rangenames <- mytrans$transcript_name
ribofilenames <- names(bigwigpairlist)[pairnum]
#point to focus on so we can see the frame
spoint <- riboprofdata%>%{.$pos[.$score>median(.$score)][1]}
#
transprofplot <-
riboprofdata %>%
# mutate(score= log10(score+(0.1*(min(score))))) %>%
ggplot(aes(fill=frame,color=frame,x=pos,y=score))+
geom_bar(stat='identity')+
coord_cartesian(xlim=c(spoint-50,spoint+50))+
ggtitle(str_interp("Riboseq Read Profile for:\n${mytrans}\nfile:${ribofilenames}"))+
theme_bw()
lociplotfile <- 'plots/loci_riboprofiles/test.svg'%T>%{normalizePath(.)%>%message}
svglite(lociplotfile);print(transprofplot);dev.off()
readxl::read_excel('ext_data/20180719_OD5P_lncRNA_RE_peptides_combiner.xlsx')
vcfs <- Sys.glob('ext_data/*.vcf')
#may god forgive me
vcfgrs<-vcfs%>%map(.%>%fread%>%{colnames(.)%<>%str_replace('#','');colnames(.)[1:2]<-c('seqnames','start');.}%>%mutate(width=nchar(ALT),seqnames=paste0('chr',seqnames))%>%DT2GR(FaFile('../genomes/hg19.fa')%>%seqinfo))
# library(biomaRt)
# mart <- useDataset("hsapiens_gene_ensembl", useMart("ensembl"))
# genes <- df$ensembl_gene_id
# symbol <- getBM(filters = "ensembl_gene_id",
# attributes = c("ensembl_gene_id","hgnc_symbol"),
# values = genes,
# mart = mart)
# df <- merge(x = symbol,
# y = df,
# by.x="ensembl_gene_id",
# by.y="ensembl_gene_id")
##______
#this code will lift our vcfs over to the 38 annotation
GRanges(
c('chr1:1-10:+',
'chr1:100-110:+',
'chr1:201-210:+',
'chr1:301-310:+')
)%>%{.$transcript_name='a';.}%>%
{.$exon_='a';.}
#we want to map our ORFs onto the transcript as GRs, with only necessary metadata
#then do a mergeByOverlaps
#then use the position and the alt to modify the protein column
#then map this back to our original data
#Testomg
firstfalse <- which(vcfpullseqs!=vcfrefannoseq)[1]
vcfgrs[[1]][firstfalse]%>%getSeq(x=FaFile('../genomes/hg38.fa'))%>%as.character
vcfgrs[[1]][firstfalse]
orfdt
| /src/annotation_find.R | no_license | zslastman/Ribo_Lausanne | R | false | false | 8,820 | r | library(svglite)
library(readr)
library(Biostrings)
library(Rsamtools)
library(rtracklayer)
library(GenomicFeatures)
library(data.table)
library(stringr)
#load some utility functions
source("/fast_new/work/groups/ag_ohler/dharnet_m/Ribo_Lausanne/functions.R")
#load excel data, parse
stopifnot(c("ere_Peptides", "lncRNA_peptides") == readxl::excel_sheets('ext_data/20180719_OD5P_lncRNA_RE_peptides_combiner.xlsx'))
#read in the data on the repeat associated peptides
ere_table <- readxl::read_excel('ext_data/20180719_OD5P_lncRNA_RE_peptides_combiner.xlsx',sheet=1)
colnames(ere_table) <- c("MS_identified_peptide", "ID", "seqnames", "start", "end", "repName",
"strand", "class.fam", "unMerged", "FPKM RNA seq OD5PCTRL", "FPKM RNA seq OD5PDAC")
#read in the data on the lincRNA peptides
lncRNA_table <- readxl::read_excel('ext_data/20180719_OD5P_lncRNA_RE_peptides_combiner.xlsx',sheet=2)%>%
set_colnames(c("MS-identified peptide", "gene_id", "Gene Name",
"Gene Type", "FPKM RNAseq OD5PCTRL", "FPKM RNAseq DAC"))
#import genome as fasta file object
genome <- ('../genomes/hg38.fa'%T>%{stopifnot(file.exists(.)) })%>%FaFile
annofile <- 'annotation/gencode.v22.annotation.gtf'%T>%{stopifnot(file.exists(.)) }
ere_gr <- DT2GR(ere_table,seqinf=seqinfo(genome))
read_compressed_gfile <- function(annofile,annotype,fformat='gtf'){
f=tempfile();
stopifnot(file.exists(annofile))
catbin = ifelse(tools::file_ext(annofile)=='gz','zcat','cat')
system(str_interp('${catbin} ${annofile} | grep -e "\t${annotype}\t" > ${f}'));
out = import(f,format=fformat)
file.remove(f)
out
}
genes<-read_compressed_gfile(annofile,'gene')
start_codons<-read_compressed_gfile(annofile,'start_codon')
transcripts<-read_compressed_gfile(annofile,'transcript')
exons<-read_compressed_gfile(annofile,'exon')
names(exons)<-exons$transcript_id
exons<-makeTxDbFromGRanges(exons)%>%exonsBy(use.names=TRUE)
#now get the relevant genes for our lincRNAs
stopifnot(all(lncRNA_table$gene_id %in% genes$gene_id))
lincgenes <- subset(genes, gene_id %in% lncRNA_table$gene_id)
#let's play with maptToTrnascripts
# testexons <- exons%>%head(10e3)%>%subset(transcript_id!=last(transcript_id))
# # rpls <- genes%>%subset(gene_name%>%str_detect('RPL'))
# rpls19 <- genes%>%subset(gene_name%>%str_detect('RPL'))
# import('bigwigs/star_OD5P_05_uM_DAC_1_Aligned.sortedByCoord.out.bam_Ribo_coverage_plus.bw',which = rpls)%>%{strand(.)<-'+';.} %>%
# subsetByOverlaps(rpls)%>%.$score%>%sum
# import('bigwigs/star_OD5P_05_uM_DAC_1_Aligned.sortedByCoord.out.bam_Ribo_coverage_plus.bw',which = rpls)%>%{strand(.)<-'+';.} %>%
# subsetByOverlaps(rpls19)%>%.$score%>%sum
##########Producing metaprofiles to
allbigwigs<-Sys.glob('bigwigs/*_Ribo_coverage_*.bw')
bigwigpairlist <- allbigwigs%>%
data_frame(file=.)%>%
mutate(base=file%>%basename%>%str_replace('plus|minus',''))%>%
mutate(strand=file%>%basename%>%str_extract('plus|minus'))%>%
mutate(strand = case_when(
strand=='plus'~'+',
strand=='minus'~'-'
)) %>%
arrange(desc(strand))%>%
{split(.,.$base)}%>%{map(.,~split(.$file,.$strand))%>%map(rev)}
stopifnot(bigwigpairlist%>%names%>%n_distinct%>%`>`(3))
stopifnot(bigwigpairlist%>%.[[1]]%>%names%>%`==`(c('+','-')))
startcod_windows<-start_codons%>%
# subset(gene_name%>%str_detect('RPL'))%>%
resize(101,'center')
#TODO this logic can be split up a bit to work on bam files say
get_5p_profiles<- function(startcod_windows,bigwigpair){
stopifnot(c('+','-') %in% names(bigwigpair))
# for(i in ls()) assign(i,get(i),envir=.GlobalEnv)
fp_profile_data <-
lapply(bigwigpair,FUN=import,which = startcod_windows)%>%
{for(strandi in names(.)) strand(.[[strandi]]) <- strandi;.}%>%
Reduce(f=c,.)%>%
mergeByOverlaps(startcod_windows[,'transcript_id'])%>%
mergeGR2DT%>%
transmute(tid=transcript_id,pos = start-start.1+1,score = score)%>%
group_by(tid)%>%
# dplyr::filter(sum(score)>20)%>%
mutate(score = score /sum(score))%>%
mutate(pos=pos-50)%>%
group_by(pos)%>%
summarize(score=mean(score))%>%
mutate(frame=as.factor(pos %% 3))
return(fp_profile_data)
}
pairnum = 1
fp_profile_data <- get_5p_profiles(startcod_windows[,'transcript_id'],bigwigpairlist[[pairnum]])
rangenames <- 'Start Codon Regions'
ribofilenames <- names(bigwigpairlist)[pairnum]
fp_profplot <-
ggplot(fp_profile_data,aes(fill=frame,x=pos,y=score))+
geom_bar(stat='identity')+
coord_cartesian(xlim=-50:50)+
ggtitle(str_interp("5' Metaprofile for ranges:\n${rangenames}\nfile:${ribofilenames}"))
theme_bw()
fpproffile <- 'plots/fp_profplot_lorenzRibo_rpls.svg'%T>%{normalizePath(.)%>%message}
svglite(fpproffile);print(fp_profplot);dev.off()
# sitesovergenes <-
# lapply(bigwigpairlist[[1]],import,which = unlist(genes)%>%head(1000))%>%
# {for(strandi in names(.)) strand(.[[strandi]]) <- strandi;.}%>%
# Reduce(f=c,.)%>%
# subset(score>0)
summarize_ov_score <- function(genes,sitesovergenes,myfun=sum){
data_frame(gene=queryHits(ov),score=sitesovergenes$score[subjectHits(ov)])%>%
left_join(data_frame(gene=seq_along(genes)),.)%>%
group_by(gene)%>%
dplyr::summarise(score=myfun(score))%>%
arrange()
.$score
}
# genes$score <- summarize_ov_score(genes,sitesovergenes)
# genes$score%>%table
#############Now individual Plots
#TODO this logic can be split up a bit to work on bam files say
get_riboprofdata<- function(exons_tr,bigwigpair){
message( names(bigwigpair))
stopifnot(c('+','-') %in% names(bigwigpair))
for(i in ls()) assign(i,get(i),envir=.GlobalEnv)
seqlevs <-list(profilegrange,exons_tr)%>%unique%>%as.character
shared_seqinfo <- suppressWarnings(intersect(seqinfo(BigWigFile(bigwigpair[[1]])),seqinfo(exons_tr)))
trseqinfo <- Seqinfo(seqnames=names(exons_tr),seqlengths=as.vector(sum(width(exons_tr))))
profilegrange <-
suppressWarnings(lapply(bigwigpair,import,which = unlist(exons_tr)))%>%
{for(strandi in names(.)) strand(.[[strandi]]) <- strandi;.}%>%
Reduce(f=c,.)%>%
subset(score>0)
#now map our prifle data to the exons space
rle <- suppressWarnings(mapToTranscripts(profilegrange,exons_tr))
rle$score<-profilegrange$score[rle$xHits];
seqinfo(rle)<-trseqinfo[seqinfo(rle)@seqnames]
rle <- coverage(rle, weight='score')
rle %>%
#selecting the transcirpt we want
lapply(FUN=.%>%
#turn that into a dataframe
{
pos = which(.!=0)
data_frame(pos=pos, score = as.vector(.[pos]))
}
)%>%
bind_rows(.id='tid')%>%
mutate(frame=as.factor(pos %% 3))
}
#pick a set of files and a transcript
mytrans <- transcripts%>%subset(gene_name%>%str_detect('RPL'))%>%sample(1)
pairnum = 1
#get the profile data
riboprofdata <- get_riboprofdata(exons[mytrans$transcript_id],bigwigpairlist[[pairnum]])
#check there's data
stopifnot(riboprofdata$score%>%sum%>%`>`(0))
#now print the plot
rangenames <- mytrans$transcript_name
ribofilenames <- names(bigwigpairlist)[pairnum]
#point to focus on so we can see the frame
spoint <- riboprofdata%>%{.$pos[.$score>median(.$score)][1]}
#
transprofplot <-
riboprofdata %>%
# mutate(score= log10(score+(0.1*(min(score))))) %>%
ggplot(aes(fill=frame,color=frame,x=pos,y=score))+
geom_bar(stat='identity')+
coord_cartesian(xlim=c(spoint-50,spoint+50))+
ggtitle(str_interp("Riboseq Read Profile for:\n${mytrans}\nfile:${ribofilenames}"))+
theme_bw()
lociplotfile <- 'plots/loci_riboprofiles/test.svg'%T>%{normalizePath(.)%>%message}
svglite(lociplotfile);print(transprofplot);dev.off()
readxl::read_excel('ext_data/20180719_OD5P_lncRNA_RE_peptides_combiner.xlsx')
vcfs <- Sys.glob('ext_data/*.vcf')
#may god forgive me
vcfgrs<-vcfs%>%map(.%>%fread%>%{colnames(.)%<>%str_replace('#','');colnames(.)[1:2]<-c('seqnames','start');.}%>%mutate(width=nchar(ALT),seqnames=paste0('chr',seqnames))%>%DT2GR(FaFile('../genomes/hg19.fa')%>%seqinfo))
# library(biomaRt)
# mart <- useDataset("hsapiens_gene_ensembl", useMart("ensembl"))
# genes <- df$ensembl_gene_id
# symbol <- getBM(filters = "ensembl_gene_id",
# attributes = c("ensembl_gene_id","hgnc_symbol"),
# values = genes,
# mart = mart)
# df <- merge(x = symbol,
# y = df,
# by.x="ensembl_gene_id",
# by.y="ensembl_gene_id")
##______
#this code will lift our vcfs over to the 38 annotation
GRanges(
c('chr1:1-10:+',
'chr1:100-110:+',
'chr1:201-210:+',
'chr1:301-310:+')
)%>%{.$transcript_name='a';.}%>%
{.$exon_='a';.}
#we want to map our ORFs onto the transcript as GRs, with only necessary metadata
#then do a mergeByOverlaps
#then use the position and the alt to modify the protein column
#then map this back to our original data
#Testomg
firstfalse <- which(vcfpullseqs!=vcfrefannoseq)[1]
vcfgrs[[1]][firstfalse]%>%getSeq(x=FaFile('../genomes/hg38.fa'))%>%as.character
vcfgrs[[1]][firstfalse]
orfdt
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Qskat.R
\name{KAT.cnull}
\alias{KAT.cnull}
\title{Fit a null linear regression model}
\usage{
KAT.cnull(Y, X)
}
\arguments{
\item{Y}{continuous outcome}
\item{X}{covariates to be adjusted, setting X=NULL with no covariate}
}
\description{
Fit a null linear model to be used for variant set association test
}
\keyword{KAT.cnull}
| /man/KAT.cnull.Rd | no_license | biostatpzeng/mkatr | R | false | true | 408 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Qskat.R
\name{KAT.cnull}
\alias{KAT.cnull}
\title{Fit a null linear regression model}
\usage{
KAT.cnull(Y, X)
}
\arguments{
\item{Y}{continuous outcome}
\item{X}{covariates to be adjusted, setting X=NULL with no covariate}
}
\description{
Fit a null linear model to be used for variant set association test
}
\keyword{KAT.cnull}
|
# cat_building - Experiment 3b
# write output of analysis to text file?
write.output <- TRUE
# load required packages
libs <- c("plyr", "binom", "lme4", "lmerTest", "tidyr","ggplot2")
lapply(libs, library, character.only = TRUE)
# define standard error function
sem <- function(var){
return(sd(var) / sqrt(length(var[!is.na(var)])))
}
# turn off scientific notation
options(scipen = 999)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Read in data
# read in data from spontaneous reminding assessment
data.reminding <- read.csv('Experiment 3b reminding data.csv')
# read in data from study tasks
data.study <- read.csv('Experiment 3b study task data.csv')
names(data.study)[1] <- 'pnum'
# # subset only data necessary for present analyses and descriptives
# # # create long form data for encoding analyses + descriptives
data.study.lf <- gather(data.study[,c(1:3,27,52)], key = 'stim', value = 'final.task.score',-c('pnum','condition','encoding.count.bal'))
data.study.lf$principle <- ifelse(data.study.lf$stim == "paas.description.total", "Problem-as-a-solution","Convergence")
data.study.lf$principle <- as.factor(data.study.lf$principle)
data.study.pass <- data.study[,c(1,2,3,54)]
names(data.study.pass)[4] <- 'avgPerf'
data.study.pass$principle <- 'Problem-as-a-solution'
data.study.conv <- data.study[,c(1,2,3,55)]
names(data.study.conv)[4] <- 'avgPerf'
data.study.conv$principle <- 'Convergence'
data.study.lf2 <- rbind(data.study.pass,data.study.conv)
data.study.lf2$principle <- as.factor(data.study.lf2$principle)
# merge reminding data and encoding data
data.combined <- merge(x = data.reminding, y= data.study.lf[,-c(2:4)],
by= c('pnum', 'principle'), all.x = TRUE, all.y = FALSE)
study.cases <- data.frame(id = rep(data.study[,1],8),
condition = rep(data.study[,2], 8),
count.bal = rep(data.study[,3], 8),
principle = c(rep('Problem-as-a-solution', nrow(data.study)*4),rep('Convergence', nrow(data.study)*4)),
case = c(rep("avalanche", nrow(data.study)),rep("wildfire", nrow(data.study)),rep("earthquake", nrow(data.study)),rep("solar", nrow(data.study)),
rep("army", nrow(data.study)),rep("tank", nrow(data.study)),rep("wood", nrow(data.study)),rep("oil", nrow(data.study))),
explanation = c(data.study[,7], data.study[,12], data.study[,17], data.study[,22], data.study[,32], data.study[,37], data.study[,42], data.study[,47]))
study.cases$position <- ifelse((study.cases$count.bal == 'a' & study.cases$case == "wildfire")|(study.cases$count.bal == 'a' & study.cases$case == "tank"), 1, ifelse((study.cases$count.bal == 'a' & study.cases$case == "avalanche")|(study.cases$count.bal == 'a' & study.cases$case == "army"), 2, ifelse((study.cases$count.bal == 'a' & study.cases$case == "earthquake")|(study.cases$count.bal == 'a' & study.cases$case == "oil"), 3, ifelse((study.cases$count.bal == 'a' & study.cases$case == "solar")|(study.cases$count.bal == 'a' & study.cases$case == "wood"), 4,
ifelse((study.cases$count.bal == 'b' & study.cases$case == "avalanche")|(study.cases$count.bal == 'b' & study.cases$case == "army"), 1, ifelse((study.cases$count.bal == 'b' & study.cases$case == "earthquake")|(study.cases$count.bal == 'b' & study.cases$case == "oil"), 2, ifelse((study.cases$count.bal == 'b' & study.cases$case == "solar")|(study.cases$count.bal == 'b' & study.cases$case == "wood"), 3, ifelse((study.cases$count.bal == 'b' & study.cases$case == "wildfire")|(study.cases$count.bal == 'b' & study.cases$case == "tank"), 4,
ifelse((study.cases$count.bal == 'c' & study.cases$case == "earthquake")|(study.cases$count.bal == 'c' & study.cases$case == "oil"), 1, ifelse((study.cases$count.bal == 'c' & study.cases$case == "solar")|(study.cases$count.bal == 'c' & study.cases$case == "wood"), 2, ifelse((study.cases$count.bal == 'c' & study.cases$case == "wildfire")|(study.cases$count.bal == 'c' & study.cases$case == "tank"), 3, ifelse((study.cases$count.bal == 'c' & study.cases$case == "avalanche")|(study.cases$count.bal == 'c' & study.cases$case == "army"), 4,
ifelse((study.cases$count.bal == 'd' & study.cases$case == "solar")|(study.cases$count.bal == 'd' & study.cases$case == "wood"), 1, ifelse((study.cases$count.bal == 'd' & study.cases$case == "wildfire")|(study.cases$count.bal == 'd' & study.cases$case == "tank"), 2, ifelse((study.cases$count.bal == 'd' & study.cases$case == "avalanche")|(study.cases$count.bal == 'd' & study.cases$case == "army"), 3, ifelse((study.cases$count.bal == 'd' & study.cases$case == "earthquake")|(study.cases$count.bal == 'd' & study.cases$case == "oil"), 4,
ifelse((study.cases$count.bal == 'e' & study.cases$case == "wildfire")|(study.cases$count.bal == 'e' & study.cases$case == "tank"), 1, ifelse((study.cases$count.bal == 'e' & study.cases$case == "earthquake")|(study.cases$count.bal == 'e' & study.cases$case == "oil"), 2, ifelse((study.cases$count.bal == 'e' & study.cases$case == "solar")|(study.cases$count.bal == 'e' & study.cases$case == "wood"), 3, ifelse((study.cases$count.bal == 'e' & study.cases$case == "avalanche")|(study.cases$count.bal == 'e' & study.cases$case == "army"), 4,
ifelse((study.cases$count.bal == 'f' & study.cases$case == "solar")|(study.cases$count.bal == 'f' & study.cases$case == "wood"), 1, ifelse((study.cases$count.bal == 'f' & study.cases$case == "wildfire")|(study.cases$count.bal == 'f' & study.cases$case == "tank"), 2, ifelse((study.cases$count.bal == 'f' & study.cases$case == "avalanche")|(study.cases$count.bal == 'f' & study.cases$case == "army"), 3, ifelse((study.cases$count.bal == 'f' & study.cases$case == "earthquake")|(study.cases$count.bal == 'f' & study.cases$case == "oil"), 4,
ifelse((study.cases$count.bal == 'g' & study.cases$case == "avalanche")|(study.cases$count.bal == 'g' & study.cases$case == "army"), 1, ifelse((study.cases$count.bal == 'g' & study.cases$case == "earthquake")|(study.cases$count.bal == 'g' & study.cases$case == "oil"), 2, ifelse((study.cases$count.bal == 'g' & study.cases$case == "wildfire")|(study.cases$count.bal == 'g' & study.cases$case == "tank"), 3, ifelse((study.cases$count.bal == 'g' & study.cases$case == "solar")|(study.cases$count.bal == 'g' & study.cases$case == "wood"), 4,
ifelse((study.cases$count.bal == 'i' & study.cases$case == "wildfire")|(study.cases$count.bal == 'i' & study.cases$case == "tank"), 1, ifelse((study.cases$count.bal == 'i' & study.cases$case == "earthquake")|(study.cases$count.bal == 'i' & study.cases$case == "oil"), 2, ifelse((study.cases$count.bal == 'i' & study.cases$case == "solar")|(study.cases$count.bal == 'i' & study.cases$case == "wood"), 3, ifelse((study.cases$count.bal == 'i' & study.cases$case == "avalanche")|(study.cases$count.bal == 'i' & study.cases$case == "army"), 4,
ifelse((study.cases$count.bal == 'k' & study.cases$case == "avalanche")|(study.cases$count.bal == 'k' & study.cases$case == "army"), 1, ifelse((study.cases$count.bal == 'k' & study.cases$case == "solar")|(study.cases$count.bal == 'k' & study.cases$case == "wood"), 2, ifelse((study.cases$count.bal == 'k' & study.cases$case == "earthquake")|(study.cases$count.bal == 'k' & study.cases$case == "oil"), 3, ifelse((study.cases$count.bal == 'k' & study.cases$case == "wildfire")|(study.cases$count.bal == 'k' & study.cases$case == "tank"), 4, 999))))))))))))))))))))))))))))))))))))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Descriptive Statistics
# # Descriptives for spontaneous remindings to target cues by condition
desc.target.condition <- ddply(subset(data.reminding, principle != "Distractor"),
.(condition), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.target.condition <- cbind(desc.target.condition,
binom.logit(desc.target.condition[,3], desc.target.condition[,4],
conf.level = 0.95)[,c(5,6)])
desc.target.condition <- cbind(desc.target.condition[,1], rep("Combined",2),desc.target.condition[,-1])
names(desc.target.condition)[c(1,2,6,7)] <- c("condition","principle","lower95ci","upper95ci")
# # Descriptives for spontaneous remindings to target cues by condition
desc.target.principle <- ddply(subset(data.reminding, principle != "Distractor"),
.(principle), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.target.principle <- cbind(desc.target.principle,
binom.logit(desc.target.principle[,3], desc.target.principle[,4],
conf.level = 0.95)[,c(5,6)])
desc.target.principle <- cbind(rep("Combined",2),desc.target.principle)
names(desc.target.principle)[c(1,6,7)] <- c("condition","lower95ci","upper95ci")
# # Descriptives for spontaneous remindings to target cues by condition and principle
desc.target.cond.princ <- ddply(subset(data.reminding, principle != "Distractor"),
.(condition, principle), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.target.cond.princ <- cbind(desc.target.cond.princ,
binom.logit(desc.target.cond.princ[,4], desc.target.cond.princ[,5],
conf.level = 0.95)[,c(5,6)])
names(desc.target.cond.princ)[6:7] <- c("lower95ci","upper95ci")
# # combine into single data.frame for all target cues
descriptives.targets <- rbind(desc.target.condition,
desc.target.principle,desc.target.cond.princ)
# # # Print descriptives
cat('\n');print("REMINDINGS TO TARGETS BY CONDITION AND PRINCIPLE DESCRIPTIVES")
print(descriptives.targets)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(descriptives.targets, 'E3b reminding to targets by condition and principle descriptives.csv', row.names = FALSE)
}
# # Descriptives for spontaneous remindings to distractors by condition
desc.distract.condition <- ddply(subset(data.reminding, principle == "Distractor"),
.(condition), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.distract.condition <- cbind(desc.distract.condition,
binom.logit(desc.distract.condition[,3], desc.distract.condition[,4],
conf.level = 0.95)[,c(5,6)])
desc.distract.condition <- cbind(desc.distract.condition[1],rep("Combined",2),desc.distract.condition[,-1])
names(desc.distract.condition)[c(2,6,7)] <- c("case","lower95ci","upper95ci")
# # Descriptives for spontaneous remindings to distractors by case
desc.distract.case <- ddply(subset(data.reminding, principle == "Distractor"),
.(case), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.distract.case <- cbind(desc.distract.case,
binom.logit(desc.distract.case[,3], desc.distract.case[,4],
conf.level = 0.95)[,c(5,6)])
desc.distract.case <- cbind(rep('Combined',2), desc.distract.case)
names(desc.distract.case)[c(1,6,7)] <- c("condition","lower95ci","upper95ci")
# # Descriptives for spontaneous remindings to distractors by condition
desc.distract.condcase <- ddply(subset(data.reminding, principle == "Distractor"),
.(condition, case), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.distract.condcase <- cbind(desc.distract.condcase,
binom.logit(desc.distract.condcase[,4], desc.distract.condcase[,5],
conf.level = 0.95)[,c(5,6)])
names(desc.distract.condcase)[6:7] <- c("lower95ci","upper95ci")
descriptives.distractors <- rbind(desc.distract.condition,
desc.distract.case,desc.distract.condcase)
# # # Print descriptives
cat('\n');print("OVERGENERALIZATION TO DISTRACTORS BY CASE AND DESCRIPTIVES")
print(descriptives.distractors)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(descriptives.distractors, 'E3b reminding to distractors by case descriptives.csv', row.names = FALSE)
}
# # Descriptives for spontaneous remindings to distractors by distractor position
desc.distract.pos <- ddply(subset(data.reminding, principle == "Distractor"),
.(position), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.distract.pos <- cbind(desc.distract.pos,
binom.logit(desc.distract.pos[,3], desc.distract.pos[,4],
conf.level = 0.95)[,c(5,6)])
names(desc.distract.pos)[5:6] <- c("lower95ci","upper95ci")
# # # Print descriptives
cat('\n');print("OVERGENERALIZATION TO DISTRACTORS BY POSITION IN ASSESSMENT")
print(desc.distract.pos)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(desc.distract.pos, 'E3b reminding to distractors by position descriptives.csv', row.names = FALSE)
}
# # Descriptives for number of cases listed for target cues by condition
desc.num.cases.cond <- ddply(subset(data.reminding, principle != "Distractor"),
.(condition), summarize, avg.cases = mean(mem.listed), sd.cases = sd(mem.listed),
min.cases = min(mem.listed), max.cases = max(mem.listed), n.obs = length(mem.listed))
# # # Print descriptives
cat('\n');print("NUMBER OF RESPONSES PROVIDED TO TARGET CUES BY CONDITION")
print(desc.num.cases.cond)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(desc.num.cases.cond, 'E3b number of responses to target cues by condition and reminding success descriptives.csv', row.names = FALSE)
}
# # Descriptives for type of successful remindings to target cues by condition
desc.remind.type <- ddply(subset(data.reminding, principle != "Distractor" & reminding == 1),
.(condition), summarize, prop.case = mean(cases.listed), n.cases = sum(cases.listed),
prop.principle = mean(set.listed), n.principle = sum(set.listed), n.obs = length(cases.listed))
desc.remind.type <- cbind(desc.remind.type,
binom.logit(desc.remind.type[,3], desc.remind.type[,6],
conf.level = 0.95)[,c(5,6)],
binom.logit(desc.remind.type[,5], desc.remind.type[,6],
conf.level = 0.95)[,c(5,6)])
names(desc.remind.type)[7:10] <- c("lower95ci.case","upper95ci.case", "lower95ci.principle","upper95ci.principle")
# # # Print descriptives
cat('\n');print("TYPE OF RESPONSES FOR SUCCESSFUL REMINDINGS TO TARGET CUES BY CONDITION")
print(desc.remind.type)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(desc.remind.type, 'E2 type of responses for successful remindings descriptives.csv', row.names = FALSE)
}
# # Descriptives for final study task performance by condition
desc.final.study.cond <- ddply(data.study.lf, .(condition), summarize,
mean.score = mean(final.task.score), sem.score = sem(final.task.score),
sd.score = sd(final.task.score))
desc.final.study.cond <- cbind(desc.final.study.cond[,1], rep("Combined",2),desc.final.study.cond[,-1])
names(desc.final.study.cond)[1:2] <- c('condition','principle')
# # Descriptives for final study task performance by principle
desc.final.study.princ <- ddply(data.study.lf, .(principle), summarize,
mean.score = mean(final.task.score), sem.score = sem(final.task.score),
sd.score = sd(final.task.score))
desc.final.study.princ <- cbind(rep("Combined",2),desc.final.study.princ)
names(desc.final.study.princ)[1] <- 'condition'
# # Descriptives for final study task performance by condition and principle
desc.final.study.cond.princ <- ddply(data.study.lf, .(condition, principle), summarize,
mean.score = mean(final.task.score), sem.score = sem(final.task.score),
sd.score = sd(final.task.score))
# # combine into single data.frame for all study task data
descriptives.final.study <- rbind(desc.final.study.cond,
desc.final.study.princ,desc.final.study.cond.princ)
# # # Print descriptives
cat('\n');print("FINAL STUDY TASK PERFORMANCE")
print(descriptives.final.study)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(descriptives.final.study, 'E3b final study task performance descriptives.csv', row.names = FALSE)
}
# # Descriptives for study task performance by condition
desc.study.id.cond <- ddply(study.cases, .(id, condition), summarize,
avg = mean(explanation), sem = sem(explanation),
sd.score = sd(explanation))
desc.study.cond <- ddply(desc.study.id.cond, .(condition), summarize,
mean.score = mean(avg), sem.score = sem(avg),
sd.score = sd(avg))
desc.study.cond <- cbind(desc.study.cond[,1], rep("Combined",2),desc.study.cond[,-1])
names(desc.study.cond)[1:2] <- c('condition','principle')
# # Descriptives for final study task performance by principle
desc.study.id.cond.princ <- ddply(study.cases, .(id,condition, principle), summarize,
avg = mean(explanation), sem.score = sem(explanation),
sd.score = sd(explanation))
desc.study.cond.princ <- ddply(desc.study.id.cond.princ, .(condition, principle), summarize,
mean.score = mean(avg), sem.score = sem(avg),
sd.score = sd(avg))
# # combine into single data.frame for all study task data
descriptives.study <- rbind(desc.study.cond,
desc.study.princ,desc.study.cond.princ)
# # # Print descriptives
cat('\n');print("FINAL STUDY TASK PERFORMANCE")
print(descriptives.study)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(descriptives.study, 'E3b study task performance descriptives.csv', row.names = FALSE)
}
# # Descriptives for study task performance across training
desc.all.study.cond <- ddply(study.cases, .(condition, position), summarize,
score = mean(explanation), sd.score = sd(explanation))
desc.all.study.cond<- cbind(desc.all.study.cond [,1], "Combined",desc.all.study.cond[,-1])
names(desc.all.study.cond)[1:2] <- c('condition','principle')
# # # Category-building by principle
desc.all.study.prince <- ddply(study.cases, .(condition, principle, position), summarize,
score = mean(explanation), sd.score = sd(explanation))
# # combine into single data.frame for all study task data
descriptives.study.position <- rbind(desc.all.study.cond, desc.all.study.prince)
# # # Print descriptives
cat('\n');print("STUDY TASK PERFORMANCE BY POSITION")
print(descriptives.study.position)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(descriptives.study.position, 'E3b study task performance by positiondescriptives.csv', row.names = FALSE)
}
# # Descriptives for Category-building study performance by case
desc.study.case <- ddply(study.cases, .(case, principle), summarize,
score = mean(explanation), sd.score = sd(explanation))
desc.study.case <- desc.study.case[order(desc.study.case[,2]),]
# # # Print descriptives
cat('\n');print("CATEGORY-BUILDING STUDY PERFORMANCE BY CASE")
print(desc.study.case)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(desc.study.case, 'E3b category-building study performance by case descriptives.csv', row.names = FALSE)
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Inferential Statistics
# # save a copy of stats output as text file
if(write.output == TRUE){
sink('Experiment 3b Analysis Output.txt', split = TRUE)
}
# # Sponteous reminding - target cues by condition
cat('\n');cat('\n'); print("SPONTNAEOUS REMINDING TARGET CUES BY CONDITION RESULTS")
# # # Is there a higher rate of spontaneous remindings in Category-building than comparison conditions?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
mod1CB <- glmer(reminding ~ condition + (1|pnum),
subset(data.reminding, principle != "Distractor"), family = 'binomial')
print(summary(mod1CB))
# # Calculate Odds Ratios as effect size for primary analysis
CB.hits <- mean(subset(data.reminding, principle != 'Distractor' & condition == "Category-building")$reminding)
SU.hits <- mean(subset(data.reminding, principle != 'Distractor' & condition == "Summarize")$reminding)
# # # Calculate Odds Ratio
CB.SU.OR <- (CB.hits / (1- CB.hits)) / (SU.hits/ (1-SU.hits))
# # # Print results
cat('\n'); print("SPONTNAEOUS REMINDING TARGET CUES BY CONDITION EFFECTS SIZES")
print(paste('Category-building vs. Summarization Odds Ratio:', CB.SU.OR))
cat('\n')
# # Sponteous reminding - target cues by condition and principle
cat('\n');cat('\n'); print("SPONTNAEOUS REMINDING TARGET CUES BY CONDITION AND PRINCIPLE RESULTS")
# # # Is there a higher rate of spontaneous remindings in Category-building than Summarize?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
data.reminding$principle <- relevel(data.reminding$principle, ref = 'Problem-as-a-solution')
mod2CBa <- glmer(reminding ~ condition*principle + (1|pnum),
subset(data.reminding, principle != "Distractor"), family = 'binomial')
print(summary(mod2CBa))
data.reminding$principle <- relevel(data.reminding$principle, ref = 'Convergence')
mod2CBb <- glmer(reminding ~ condition*principle + (1|pnum),
subset(data.reminding, principle != "Distractor"), family = 'binomial')
print(summary(mod2CBb))
# # # Do principles differ for Summarize condition?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Summarize")
data.reminding$principle <- relevel(data.reminding$principle, ref = 'Problem-as-a-solution')
mod2SSa <- glmer(reminding ~ condition*principle + (1|pnum),
subset(data.reminding, principle != "Distractor"), family = 'binomial')
print(summary(mod2SSa))
data.reminding$principle <- relevel(data.reminding$principle, ref = 'Convergence')
mod2SSb <- glmer(reminding ~ condition*principle + (1|pnum),
subset(data.reminding, principle != "Distractor"), family = 'binomial')
print(summary(mod2SSb))
# # Spontaneous reminding - number of cases listed for target cues
cat('\n');cat('\n'); print("NUMBER OF RESPONSES PROVIDED TO TARGET CUES BY CONDITION")
# # # Does Category-building differ from comparison conditions in the number of responses provided for each reminding assessment?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
mod8CB <- lmer(mem.listed ~ condition + (1|pnum), data = subset(data.reminding, principle != "Distractor"))
print(summary(mod8CB))
# # Spontaneous reminding - number of case-based reminding successes for target cues by condition
cat('\n');cat('\n'); print("NUMBER OF CASE-BASED REMINDING SUCCESSES BY CONDITION")
# # # Does Category-building differ from comparison conditions in the number of case-based reminding successes?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
mod9CB <- glmer(cases.listed ~ condition + (1|pnum), data = subset(data.reminding, principle != "Distractor" & reminding == 1), family = 'binomial')
print(summary(mod9CB))
# # Spontaneous reminding - number of principle-based reminding successes for target cues by condition
cat('\n');cat('\n'); print("NUMBER OF PRINCIPLE-BASED REMINDING SUCCESSES BY CONDITION")
# # # Does Category-building differ from comparison conditions in the number of principle-based reminding successes?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
mod10CB <- glmer(set.listed ~ condition + (1|pnum), data = subset(data.reminding, principle != "Distractor" & reminding == 1), family = 'binomial')
print(summary(mod10CB))
# # Sponteous reminding - distractor cues by condition
cat('\n');cat('\n'); print("SPONTNAEOUS REMINDING DISTRACTOR CUES BY CONDITION RESULTS")
# # # Is there a higher rate of overgeneralization in Category-building than comparison conditions?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
mod3CB <- glmer(reminding ~ condition + (1|pnum),
subset(data.reminding, principle == "Distractor"), family = 'binomial')
print(summary(mod3CB))
# # Sponteous reminding - distractor cues by condition and cue
cat('\n');cat('\n'); print("SPONTNAEOUS REMINDING DISTRACTOR CUES BY CUE RESULTS")
# # # Is there a higher rate of overgeneralization in Category-building than comparison conditions?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
mod4CA <- glmer(reminding ~ case + (1|pnum),
subset(data.reminding, principle == "Distractor"), family = 'binomial')
print(summary(mod4CA))
# # Final study task - performance by condition
cat('\n');cat('\n'); print("FINAL STUDY TASK PERFORMANCE BY CONDITION RESULTS")
# # # Does Category-building differ in the ability to articulate the target principle by the end of training from Double- and Single-comparison?
data.study.lf$condition <- relevel(data.study.lf$condition, ref = 'Category-building')
data.study.lf$principle <- relevel(data.study.lf$principle, ref = 'Problem-as-a-solution')
mod6CBa <- lm(final.task.score ~ condition*principle, data.study.lf)
print(summary(mod6CBa))
data.study.lf$condition <- relevel(data.study.lf$condition, ref = 'Category-building')
data.study.lf$principle <- relevel(data.study.lf$principle, ref = 'Convergence')
mod6CBb <- lm(final.task.score ~ condition*principle, data.study.lf)
print(summary(mod6CBb))
# # # Does Single-comparison for effect of target principle?
data.study.lf$condition <- relevel(data.study.lf$condition, ref = 'Summarize')
data.study.lf$principle <- relevel(data.study.lf$principle, ref = 'Problem-as-a-solution')
mod6SU <- lm(final.task.score ~ condition*principle, data.study.lf)
print(summary(mod6SU))
# # All study task - performance by condition
cat('\n');cat('\n'); print("ALL STUDY TASK PERFORMANCE BY CONDITION RESULTS")
data.study.lf2$condition <- relevel(data.study.lf2$condition, ref = 'Category-building')
data.study.lf2$principle <- relevel(data.study.lf2$principle, ref = 'Problem-as-a-solution')
mod7CBa <- lm(avgPerf ~ condition*principle, data.study.lf2)
print(summary(mod7CBa))
data.study.lf2$condition <- relevel(data.study.lf2$condition, ref = 'Category-building')
data.study.lf2$principle <- relevel(data.study.lf2$principle, ref = 'Convergence')
mod7CBb <- lm(avgPerf ~ condition*principle, data.study.lf2)
print(summary(mod7CBb))
# # # Does Single-comparison for effect of target principle?
data.study.lf2$condition <- relevel(data.study.lf2$condition, ref = 'Summarize')
data.study.lf2$principle <- relevel(data.study.lf2$principle, ref = 'Problem-as-a-solution')
mod7SU <- lm(avgPerf ~ condition*principle, data.study.lf2)
print(summary(mod7SU))
# # Study task - performance across task within each condition. Included in coverletter, but not manuscript
# study.cases$condition <- relevel(study.cases$condition, ref = 'Category-building')
# mod11CB <- lmer(explanation ~ position*condition + (1|id), data = study.cases)
# print(summary(mod11CB))
# study.cases$condition <- relevel(study.cases$condition, ref = 'Summarize')
# mod11SU <- lmer(explanation ~ position*condition + (1|id), data = study.cases)
# print(summary(mod11SU))
# # study task - performance by case aggregating over condition
study.cases$case <- relevel(study.cases$case, ref = 'avalanche')
mod12a <- lmer(explanation ~ case + (1|id), data = subset(study.cases, principle == 'Problem-as-a-solution'& case != "final.paas"))
summary(mod12a)
study.cases$case <- relevel(study.cases$case, ref = 'earthquake')
mod12b <- lmer(explanation ~ case + (1|id), data = subset(study.cases, principle == 'Problem-as-a-solution' & case != "final.paas"))
summary(mod12b)
study.cases$case <- relevel(study.cases$case, ref = 'solar')
mod12c <- lmer(explanation ~ case + (1|id), data = subset(study.cases, principle == 'Problem-as-a-solution' & case != "final.paas"))
summary(mod12c)
study.cases$case <- relevel(study.cases$case, ref = 'tank')
mod12d <- lmer(explanation ~ case + (1|id), data = subset(study.cases, principle == 'Convergence' & case != "final.conv"))
summary(mod12d)
study.cases$case <- relevel(study.cases$case, ref = 'wood')
mod12e <- lmer(explanation ~ case + (1|id), data = subset(study.cases, principle == 'Convergence' & case != "final.conv"))
summary(mod12e)
study.cases$case <- relevel(study.cases$case, ref = 'army')
mod12f <- lmer(explanation ~ case + (1|id), data = subset(study.cases, principle == 'Convergence' & case != "final.conv"))
summary(mod12f)
if(write.output == TRUE){
sink()
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Visualizations
# # Spontaneous remindings to target cues by condition and principle
# # # Get fitted values from regression models
plot1df <- subset(data.reminding, principle != 'Distractor')
plot1df$condition <- as.factor(as.character(plot1df$condition))
plot1df$fitted.cond <- predict(newdata = plot1df, mod1CB, type = 'response')
plot1df$fitted.princ <- predict(newdata = plot1df, mod2CBa, type = 'response')
# # # Add descriptives for fitted values to existing descriptives
plot1df.summary <- subset(descriptives.targets, condition != 'Combined')
plot1df.summary$fitted <- c(
ddply(plot1df, .(condition), summarize, fitted.reminding = mean(fitted.cond))$fitted.reminding,
ddply(plot1df, .(condition, principle), summarize, fitted.reminding = mean(fitted.princ))$fitted.reminding
)
# # # rename vars for plotting
plot1df.summary[,1]<-as.factor(plot1df.summary[,1])
levels(plot1df.summary[,1]) <- c("Category\nBuilding","Summarization","Total")
plot1df.summary[,2]<-as.factor(plot1df.summary[,2])
levels(plot1df.summary[,2]) <- c("Overall","Convergence","Distractor","Problem-as-a-Solution")
# # # create plot and write to disk
remind.plot <- ggplot(plot1df.summary,aes(x=condition, fill = condition, y = spont.reminding)) +
geom_bar(stat = 'identity', col = 'black',
fill = rep(c('grey85','grey25'),3))+
facet_grid(~principle) +
geom_errorbar(aes(ymin= lower95ci, ymax= upper95ci),width=.2) +
geom_point(data = plot1df.summary, aes(x=condition, color = condition, y = fitted), pch = 18,
size = 5, color = 'black') +
labs(y = "Proportion Structure-Based Reminding \n", x = "\n Study Task") +
coord_cartesian(ylim=c(0, 1)) +
scale_y_continuous(breaks=seq(0, 1, 0.1),expand = c(0,0)) +
theme_bw() +
theme(axis.text.x = element_text(face = "plain", color = "black", size = 12),
axis.title.x = element_text(face="bold", size=14),
axis.text.y = element_text(face = "plain", color = "black", size = 12),
axis.title.y = element_text(face="bold", size=16),
strip.text = element_text(face = "bold",size=14),
panel.grid.major.x = element_blank() ,
panel.grid.major.y = element_line(color = "grey"),
panel.grid.minor.y = element_line(color = "grey"),
axis.line = element_line(colour = "black"),
legend.position = "none")
ggsave('E3b remindings to target cues.png',remind.plot, width = 10.8, height = 6, units = 'in')
# # # Overgeneralization to distractors by condition and cue. Not included in manuscript.
# # # # Get fitted values from regression models
# plot2df <- subset(data.reminding, principle == 'Distractor')
# plot2df$condition <- as.factor(as.character(plot2df$condition))
# plot2df$fitted.cond <- predict(newdata = plot2df, mod4CB, type = 'response')
# plot2df$fitted.case <- predict(newdata = plot2df, mod5CB, type = 'response')
# # # # Add descriptives for fitted values to existing descriptives
# plot2df.summary <- subset(descriptives.distractors, condition != 'Combined')
# plot2df.summary$fitted <- c(
# ddply(plot2df, .(condition), summarize, fitted.reminding = mean(fitted.cond))$fitted.reminding,
# ddply(plot2df, .(condition, case), summarize, fitted.reminding = mean(fitted.case))$fitted.reminding
# )
# # # # rename vars for plotting
# plot2df.summary[,1]<-as.factor(plot2df.summary[,1])
# levels(plot2df.summary[,1]) <- c("Category\nBuilding","Double\nComparison","Single\nComparison","Total")
# plot2df.summary[,2]<-as.factor(plot2df.summary[,2])
# levels(plot2df.summary[,2]) <- c("Overall","aquarium","blackmarket","internetsec","tumor","The Birthday Party","The Wine Merchant")
# distract.plot <- ggplot(plot2df.summary,aes(x=condition, fill = case, y = spont.reminding)) +
# geom_bar(stat = 'identity', col = 'black',
# position = position_dodge())+
# geom_errorbar(aes(ymin= lower95ci, ymax= upper95ci),width=.2, position = position_dodge(.9)) +
# geom_point(data = plot2df.summary, aes(x=condition, fill = case, y = fitted), pch = 18,
# size = 5, color = 'black', position = position_dodge(.9)) +
# labs(y = "Proportion Overgeneralized \n", x = "\n Study Task", fill="Distractor") +
# coord_cartesian(ylim=c(0, 1)) +
# scale_y_continuous(breaks=seq(0, 1, 0.1),expand = c(0,0)) +
# scale_fill_manual(values = c('grey85','grey50', 'grey25')) +
# theme_bw() +
# theme(axis.text.x = element_text(face = "plain", color = "black", size = 12),
# axis.title.x = element_text(face="bold", size=14),
# axis.text.y = element_text(face = "plain", color = "black", size = 12),
# axis.title.y = element_text(face="bold", size=16),
# legend.text = element_text(size = 12),
# legend.title = element_text(size = 14),
# panel.grid.major.x = element_blank() ,
# panel.grid.major.y = element_line(color = "grey"),
# panel.grid.minor.y = element_line(color = "grey"),
# axis.line = element_line(colour = "black"),
# legend.position="bottom")
# ggsave('E3b remindings to distractor cues.png',distract.plot, width = 7, height = 7, units = 'in')
| /cat_building/exp3b_analysis.r | no_license | ssnod/psych_experiments | R | false | false | 33,320 | r | # cat_building - Experiment 3b
# write output of analysis to text file?
write.output <- TRUE
# load required packages
libs <- c("plyr", "binom", "lme4", "lmerTest", "tidyr","ggplot2")
lapply(libs, library, character.only = TRUE)
# define standard error function
sem <- function(var){
return(sd(var) / sqrt(length(var[!is.na(var)])))
}
# turn off scientific notation
options(scipen = 999)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Read in data
# read in data from spontaneous reminding assessment
data.reminding <- read.csv('Experiment 3b reminding data.csv')
# read in data from study tasks
data.study <- read.csv('Experiment 3b study task data.csv')
names(data.study)[1] <- 'pnum'
# # subset only data necessary for present analyses and descriptives
# # # create long form data for encoding analyses + descriptives
data.study.lf <- gather(data.study[,c(1:3,27,52)], key = 'stim', value = 'final.task.score',-c('pnum','condition','encoding.count.bal'))
data.study.lf$principle <- ifelse(data.study.lf$stim == "paas.description.total", "Problem-as-a-solution","Convergence")
data.study.lf$principle <- as.factor(data.study.lf$principle)
data.study.pass <- data.study[,c(1,2,3,54)]
names(data.study.pass)[4] <- 'avgPerf'
data.study.pass$principle <- 'Problem-as-a-solution'
data.study.conv <- data.study[,c(1,2,3,55)]
names(data.study.conv)[4] <- 'avgPerf'
data.study.conv$principle <- 'Convergence'
data.study.lf2 <- rbind(data.study.pass,data.study.conv)
data.study.lf2$principle <- as.factor(data.study.lf2$principle)
# merge reminding data and encoding data
data.combined <- merge(x = data.reminding, y= data.study.lf[,-c(2:4)],
by= c('pnum', 'principle'), all.x = TRUE, all.y = FALSE)
study.cases <- data.frame(id = rep(data.study[,1],8),
condition = rep(data.study[,2], 8),
count.bal = rep(data.study[,3], 8),
principle = c(rep('Problem-as-a-solution', nrow(data.study)*4),rep('Convergence', nrow(data.study)*4)),
case = c(rep("avalanche", nrow(data.study)),rep("wildfire", nrow(data.study)),rep("earthquake", nrow(data.study)),rep("solar", nrow(data.study)),
rep("army", nrow(data.study)),rep("tank", nrow(data.study)),rep("wood", nrow(data.study)),rep("oil", nrow(data.study))),
explanation = c(data.study[,7], data.study[,12], data.study[,17], data.study[,22], data.study[,32], data.study[,37], data.study[,42], data.study[,47]))
study.cases$position <- ifelse((study.cases$count.bal == 'a' & study.cases$case == "wildfire")|(study.cases$count.bal == 'a' & study.cases$case == "tank"), 1, ifelse((study.cases$count.bal == 'a' & study.cases$case == "avalanche")|(study.cases$count.bal == 'a' & study.cases$case == "army"), 2, ifelse((study.cases$count.bal == 'a' & study.cases$case == "earthquake")|(study.cases$count.bal == 'a' & study.cases$case == "oil"), 3, ifelse((study.cases$count.bal == 'a' & study.cases$case == "solar")|(study.cases$count.bal == 'a' & study.cases$case == "wood"), 4,
ifelse((study.cases$count.bal == 'b' & study.cases$case == "avalanche")|(study.cases$count.bal == 'b' & study.cases$case == "army"), 1, ifelse((study.cases$count.bal == 'b' & study.cases$case == "earthquake")|(study.cases$count.bal == 'b' & study.cases$case == "oil"), 2, ifelse((study.cases$count.bal == 'b' & study.cases$case == "solar")|(study.cases$count.bal == 'b' & study.cases$case == "wood"), 3, ifelse((study.cases$count.bal == 'b' & study.cases$case == "wildfire")|(study.cases$count.bal == 'b' & study.cases$case == "tank"), 4,
ifelse((study.cases$count.bal == 'c' & study.cases$case == "earthquake")|(study.cases$count.bal == 'c' & study.cases$case == "oil"), 1, ifelse((study.cases$count.bal == 'c' & study.cases$case == "solar")|(study.cases$count.bal == 'c' & study.cases$case == "wood"), 2, ifelse((study.cases$count.bal == 'c' & study.cases$case == "wildfire")|(study.cases$count.bal == 'c' & study.cases$case == "tank"), 3, ifelse((study.cases$count.bal == 'c' & study.cases$case == "avalanche")|(study.cases$count.bal == 'c' & study.cases$case == "army"), 4,
ifelse((study.cases$count.bal == 'd' & study.cases$case == "solar")|(study.cases$count.bal == 'd' & study.cases$case == "wood"), 1, ifelse((study.cases$count.bal == 'd' & study.cases$case == "wildfire")|(study.cases$count.bal == 'd' & study.cases$case == "tank"), 2, ifelse((study.cases$count.bal == 'd' & study.cases$case == "avalanche")|(study.cases$count.bal == 'd' & study.cases$case == "army"), 3, ifelse((study.cases$count.bal == 'd' & study.cases$case == "earthquake")|(study.cases$count.bal == 'd' & study.cases$case == "oil"), 4,
ifelse((study.cases$count.bal == 'e' & study.cases$case == "wildfire")|(study.cases$count.bal == 'e' & study.cases$case == "tank"), 1, ifelse((study.cases$count.bal == 'e' & study.cases$case == "earthquake")|(study.cases$count.bal == 'e' & study.cases$case == "oil"), 2, ifelse((study.cases$count.bal == 'e' & study.cases$case == "solar")|(study.cases$count.bal == 'e' & study.cases$case == "wood"), 3, ifelse((study.cases$count.bal == 'e' & study.cases$case == "avalanche")|(study.cases$count.bal == 'e' & study.cases$case == "army"), 4,
ifelse((study.cases$count.bal == 'f' & study.cases$case == "solar")|(study.cases$count.bal == 'f' & study.cases$case == "wood"), 1, ifelse((study.cases$count.bal == 'f' & study.cases$case == "wildfire")|(study.cases$count.bal == 'f' & study.cases$case == "tank"), 2, ifelse((study.cases$count.bal == 'f' & study.cases$case == "avalanche")|(study.cases$count.bal == 'f' & study.cases$case == "army"), 3, ifelse((study.cases$count.bal == 'f' & study.cases$case == "earthquake")|(study.cases$count.bal == 'f' & study.cases$case == "oil"), 4,
ifelse((study.cases$count.bal == 'g' & study.cases$case == "avalanche")|(study.cases$count.bal == 'g' & study.cases$case == "army"), 1, ifelse((study.cases$count.bal == 'g' & study.cases$case == "earthquake")|(study.cases$count.bal == 'g' & study.cases$case == "oil"), 2, ifelse((study.cases$count.bal == 'g' & study.cases$case == "wildfire")|(study.cases$count.bal == 'g' & study.cases$case == "tank"), 3, ifelse((study.cases$count.bal == 'g' & study.cases$case == "solar")|(study.cases$count.bal == 'g' & study.cases$case == "wood"), 4,
ifelse((study.cases$count.bal == 'i' & study.cases$case == "wildfire")|(study.cases$count.bal == 'i' & study.cases$case == "tank"), 1, ifelse((study.cases$count.bal == 'i' & study.cases$case == "earthquake")|(study.cases$count.bal == 'i' & study.cases$case == "oil"), 2, ifelse((study.cases$count.bal == 'i' & study.cases$case == "solar")|(study.cases$count.bal == 'i' & study.cases$case == "wood"), 3, ifelse((study.cases$count.bal == 'i' & study.cases$case == "avalanche")|(study.cases$count.bal == 'i' & study.cases$case == "army"), 4,
ifelse((study.cases$count.bal == 'k' & study.cases$case == "avalanche")|(study.cases$count.bal == 'k' & study.cases$case == "army"), 1, ifelse((study.cases$count.bal == 'k' & study.cases$case == "solar")|(study.cases$count.bal == 'k' & study.cases$case == "wood"), 2, ifelse((study.cases$count.bal == 'k' & study.cases$case == "earthquake")|(study.cases$count.bal == 'k' & study.cases$case == "oil"), 3, ifelse((study.cases$count.bal == 'k' & study.cases$case == "wildfire")|(study.cases$count.bal == 'k' & study.cases$case == "tank"), 4, 999))))))))))))))))))))))))))))))))))))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Descriptive Statistics
# # Descriptives for spontaneous remindings to target cues by condition
desc.target.condition <- ddply(subset(data.reminding, principle != "Distractor"),
.(condition), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.target.condition <- cbind(desc.target.condition,
binom.logit(desc.target.condition[,3], desc.target.condition[,4],
conf.level = 0.95)[,c(5,6)])
desc.target.condition <- cbind(desc.target.condition[,1], rep("Combined",2),desc.target.condition[,-1])
names(desc.target.condition)[c(1,2,6,7)] <- c("condition","principle","lower95ci","upper95ci")
# # Descriptives for spontaneous remindings to target cues by condition
desc.target.principle <- ddply(subset(data.reminding, principle != "Distractor"),
.(principle), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.target.principle <- cbind(desc.target.principle,
binom.logit(desc.target.principle[,3], desc.target.principle[,4],
conf.level = 0.95)[,c(5,6)])
desc.target.principle <- cbind(rep("Combined",2),desc.target.principle)
names(desc.target.principle)[c(1,6,7)] <- c("condition","lower95ci","upper95ci")
# # Descriptives for spontaneous remindings to target cues by condition and principle
desc.target.cond.princ <- ddply(subset(data.reminding, principle != "Distractor"),
.(condition, principle), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.target.cond.princ <- cbind(desc.target.cond.princ,
binom.logit(desc.target.cond.princ[,4], desc.target.cond.princ[,5],
conf.level = 0.95)[,c(5,6)])
names(desc.target.cond.princ)[6:7] <- c("lower95ci","upper95ci")
# # combine into single data.frame for all target cues
descriptives.targets <- rbind(desc.target.condition,
desc.target.principle,desc.target.cond.princ)
# # # Print descriptives
cat('\n');print("REMINDINGS TO TARGETS BY CONDITION AND PRINCIPLE DESCRIPTIVES")
print(descriptives.targets)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(descriptives.targets, 'E3b reminding to targets by condition and principle descriptives.csv', row.names = FALSE)
}
# # Descriptives for spontaneous remindings to distractors by condition
desc.distract.condition <- ddply(subset(data.reminding, principle == "Distractor"),
.(condition), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.distract.condition <- cbind(desc.distract.condition,
binom.logit(desc.distract.condition[,3], desc.distract.condition[,4],
conf.level = 0.95)[,c(5,6)])
desc.distract.condition <- cbind(desc.distract.condition[1],rep("Combined",2),desc.distract.condition[,-1])
names(desc.distract.condition)[c(2,6,7)] <- c("case","lower95ci","upper95ci")
# # Descriptives for spontaneous remindings to distractors by case
desc.distract.case <- ddply(subset(data.reminding, principle == "Distractor"),
.(case), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.distract.case <- cbind(desc.distract.case,
binom.logit(desc.distract.case[,3], desc.distract.case[,4],
conf.level = 0.95)[,c(5,6)])
desc.distract.case <- cbind(rep('Combined',2), desc.distract.case)
names(desc.distract.case)[c(1,6,7)] <- c("condition","lower95ci","upper95ci")
# # Descriptives for spontaneous remindings to distractors by condition
desc.distract.condcase <- ddply(subset(data.reminding, principle == "Distractor"),
.(condition, case), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.distract.condcase <- cbind(desc.distract.condcase,
binom.logit(desc.distract.condcase[,4], desc.distract.condcase[,5],
conf.level = 0.95)[,c(5,6)])
names(desc.distract.condcase)[6:7] <- c("lower95ci","upper95ci")
descriptives.distractors <- rbind(desc.distract.condition,
desc.distract.case,desc.distract.condcase)
# # # Print descriptives
cat('\n');print("OVERGENERALIZATION TO DISTRACTORS BY CASE AND DESCRIPTIVES")
print(descriptives.distractors)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(descriptives.distractors, 'E3b reminding to distractors by case descriptives.csv', row.names = FALSE)
}
# # Descriptives for spontaneous remindings to distractors by distractor position
desc.distract.pos <- ddply(subset(data.reminding, principle == "Distractor"),
.(position), summarize, spont.reminding = mean(reminding),
frequency = sum(reminding),
n = length(reminding))
# # # Generate 95% CIs for transfer rate and add to descriptives
desc.distract.pos <- cbind(desc.distract.pos,
binom.logit(desc.distract.pos[,3], desc.distract.pos[,4],
conf.level = 0.95)[,c(5,6)])
names(desc.distract.pos)[5:6] <- c("lower95ci","upper95ci")
# # # Print descriptives
cat('\n');print("OVERGENERALIZATION TO DISTRACTORS BY POSITION IN ASSESSMENT")
print(desc.distract.pos)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(desc.distract.pos, 'E3b reminding to distractors by position descriptives.csv', row.names = FALSE)
}
# # Descriptives for number of cases listed for target cues by condition
desc.num.cases.cond <- ddply(subset(data.reminding, principle != "Distractor"),
.(condition), summarize, avg.cases = mean(mem.listed), sd.cases = sd(mem.listed),
min.cases = min(mem.listed), max.cases = max(mem.listed), n.obs = length(mem.listed))
# # # Print descriptives
cat('\n');print("NUMBER OF RESPONSES PROVIDED TO TARGET CUES BY CONDITION")
print(desc.num.cases.cond)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(desc.num.cases.cond, 'E3b number of responses to target cues by condition and reminding success descriptives.csv', row.names = FALSE)
}
# # Descriptives for type of successful remindings to target cues by condition
desc.remind.type <- ddply(subset(data.reminding, principle != "Distractor" & reminding == 1),
.(condition), summarize, prop.case = mean(cases.listed), n.cases = sum(cases.listed),
prop.principle = mean(set.listed), n.principle = sum(set.listed), n.obs = length(cases.listed))
desc.remind.type <- cbind(desc.remind.type,
binom.logit(desc.remind.type[,3], desc.remind.type[,6],
conf.level = 0.95)[,c(5,6)],
binom.logit(desc.remind.type[,5], desc.remind.type[,6],
conf.level = 0.95)[,c(5,6)])
names(desc.remind.type)[7:10] <- c("lower95ci.case","upper95ci.case", "lower95ci.principle","upper95ci.principle")
# # # Print descriptives
cat('\n');print("TYPE OF RESPONSES FOR SUCCESSFUL REMINDINGS TO TARGET CUES BY CONDITION")
print(desc.remind.type)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(desc.remind.type, 'E2 type of responses for successful remindings descriptives.csv', row.names = FALSE)
}
# # Descriptives for final study task performance by condition
desc.final.study.cond <- ddply(data.study.lf, .(condition), summarize,
mean.score = mean(final.task.score), sem.score = sem(final.task.score),
sd.score = sd(final.task.score))
desc.final.study.cond <- cbind(desc.final.study.cond[,1], rep("Combined",2),desc.final.study.cond[,-1])
names(desc.final.study.cond)[1:2] <- c('condition','principle')
# # Descriptives for final study task performance by principle
desc.final.study.princ <- ddply(data.study.lf, .(principle), summarize,
mean.score = mean(final.task.score), sem.score = sem(final.task.score),
sd.score = sd(final.task.score))
desc.final.study.princ <- cbind(rep("Combined",2),desc.final.study.princ)
names(desc.final.study.princ)[1] <- 'condition'
# # Descriptives for final study task performance by condition and principle
desc.final.study.cond.princ <- ddply(data.study.lf, .(condition, principle), summarize,
mean.score = mean(final.task.score), sem.score = sem(final.task.score),
sd.score = sd(final.task.score))
# # combine into single data.frame for all study task data
descriptives.final.study <- rbind(desc.final.study.cond,
desc.final.study.princ,desc.final.study.cond.princ)
# # # Print descriptives
cat('\n');print("FINAL STUDY TASK PERFORMANCE")
print(descriptives.final.study)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(descriptives.final.study, 'E3b final study task performance descriptives.csv', row.names = FALSE)
}
# # Descriptives for study task performance by condition
desc.study.id.cond <- ddply(study.cases, .(id, condition), summarize,
avg = mean(explanation), sem = sem(explanation),
sd.score = sd(explanation))
desc.study.cond <- ddply(desc.study.id.cond, .(condition), summarize,
mean.score = mean(avg), sem.score = sem(avg),
sd.score = sd(avg))
desc.study.cond <- cbind(desc.study.cond[,1], rep("Combined",2),desc.study.cond[,-1])
names(desc.study.cond)[1:2] <- c('condition','principle')
# # Descriptives for final study task performance by principle
desc.study.id.cond.princ <- ddply(study.cases, .(id,condition, principle), summarize,
avg = mean(explanation), sem.score = sem(explanation),
sd.score = sd(explanation))
desc.study.cond.princ <- ddply(desc.study.id.cond.princ, .(condition, principle), summarize,
mean.score = mean(avg), sem.score = sem(avg),
sd.score = sd(avg))
# # combine into single data.frame for all study task data
descriptives.study <- rbind(desc.study.cond,
desc.study.princ,desc.study.cond.princ)
# # # Print descriptives
cat('\n');print("FINAL STUDY TASK PERFORMANCE")
print(descriptives.study)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(descriptives.study, 'E3b study task performance descriptives.csv', row.names = FALSE)
}
# # Descriptives for study task performance across training
desc.all.study.cond <- ddply(study.cases, .(condition, position), summarize,
score = mean(explanation), sd.score = sd(explanation))
desc.all.study.cond<- cbind(desc.all.study.cond [,1], "Combined",desc.all.study.cond[,-1])
names(desc.all.study.cond)[1:2] <- c('condition','principle')
# # # Category-building by principle
desc.all.study.prince <- ddply(study.cases, .(condition, principle, position), summarize,
score = mean(explanation), sd.score = sd(explanation))
# # combine into single data.frame for all study task data
descriptives.study.position <- rbind(desc.all.study.cond, desc.all.study.prince)
# # # Print descriptives
cat('\n');print("STUDY TASK PERFORMANCE BY POSITION")
print(descriptives.study.position)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(descriptives.study.position, 'E3b study task performance by positiondescriptives.csv', row.names = FALSE)
}
# # Descriptives for Category-building study performance by case
desc.study.case <- ddply(study.cases, .(case, principle), summarize,
score = mean(explanation), sd.score = sd(explanation))
desc.study.case <- desc.study.case[order(desc.study.case[,2]),]
# # # Print descriptives
cat('\n');print("CATEGORY-BUILDING STUDY PERFORMANCE BY CASE")
print(desc.study.case)
# # # write descriptives to file
if(write.output == TRUE){
write.csv(desc.study.case, 'E3b category-building study performance by case descriptives.csv', row.names = FALSE)
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Inferential Statistics
# # save a copy of stats output as text file
if(write.output == TRUE){
sink('Experiment 3b Analysis Output.txt', split = TRUE)
}
# # Sponteous reminding - target cues by condition
cat('\n');cat('\n'); print("SPONTNAEOUS REMINDING TARGET CUES BY CONDITION RESULTS")
# # # Is there a higher rate of spontaneous remindings in Category-building than comparison conditions?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
mod1CB <- glmer(reminding ~ condition + (1|pnum),
subset(data.reminding, principle != "Distractor"), family = 'binomial')
print(summary(mod1CB))
# # Calculate Odds Ratios as effect size for primary analysis
CB.hits <- mean(subset(data.reminding, principle != 'Distractor' & condition == "Category-building")$reminding)
SU.hits <- mean(subset(data.reminding, principle != 'Distractor' & condition == "Summarize")$reminding)
# # # Calculate Odds Ratio
CB.SU.OR <- (CB.hits / (1- CB.hits)) / (SU.hits/ (1-SU.hits))
# # # Print results
cat('\n'); print("SPONTNAEOUS REMINDING TARGET CUES BY CONDITION EFFECTS SIZES")
print(paste('Category-building vs. Summarization Odds Ratio:', CB.SU.OR))
cat('\n')
# # Sponteous reminding - target cues by condition and principle
cat('\n');cat('\n'); print("SPONTNAEOUS REMINDING TARGET CUES BY CONDITION AND PRINCIPLE RESULTS")
# # # Is there a higher rate of spontaneous remindings in Category-building than Summarize?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
data.reminding$principle <- relevel(data.reminding$principle, ref = 'Problem-as-a-solution')
mod2CBa <- glmer(reminding ~ condition*principle + (1|pnum),
subset(data.reminding, principle != "Distractor"), family = 'binomial')
print(summary(mod2CBa))
data.reminding$principle <- relevel(data.reminding$principle, ref = 'Convergence')
mod2CBb <- glmer(reminding ~ condition*principle + (1|pnum),
subset(data.reminding, principle != "Distractor"), family = 'binomial')
print(summary(mod2CBb))
# # # Do principles differ for Summarize condition?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Summarize")
data.reminding$principle <- relevel(data.reminding$principle, ref = 'Problem-as-a-solution')
mod2SSa <- glmer(reminding ~ condition*principle + (1|pnum),
subset(data.reminding, principle != "Distractor"), family = 'binomial')
print(summary(mod2SSa))
data.reminding$principle <- relevel(data.reminding$principle, ref = 'Convergence')
mod2SSb <- glmer(reminding ~ condition*principle + (1|pnum),
subset(data.reminding, principle != "Distractor"), family = 'binomial')
print(summary(mod2SSb))
# # Spontaneous reminding - number of cases listed for target cues
cat('\n');cat('\n'); print("NUMBER OF RESPONSES PROVIDED TO TARGET CUES BY CONDITION")
# # # Does Category-building differ from comparison conditions in the number of responses provided for each reminding assessment?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
mod8CB <- lmer(mem.listed ~ condition + (1|pnum), data = subset(data.reminding, principle != "Distractor"))
print(summary(mod8CB))
# # Spontaneous reminding - number of case-based reminding successes for target cues by condition
cat('\n');cat('\n'); print("NUMBER OF CASE-BASED REMINDING SUCCESSES BY CONDITION")
# # # Does Category-building differ from comparison conditions in the number of case-based reminding successes?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
mod9CB <- glmer(cases.listed ~ condition + (1|pnum), data = subset(data.reminding, principle != "Distractor" & reminding == 1), family = 'binomial')
print(summary(mod9CB))
# # Spontaneous reminding - number of principle-based reminding successes for target cues by condition
cat('\n');cat('\n'); print("NUMBER OF PRINCIPLE-BASED REMINDING SUCCESSES BY CONDITION")
# # # Does Category-building differ from comparison conditions in the number of principle-based reminding successes?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
mod10CB <- glmer(set.listed ~ condition + (1|pnum), data = subset(data.reminding, principle != "Distractor" & reminding == 1), family = 'binomial')
print(summary(mod10CB))
# # Sponteous reminding - distractor cues by condition
cat('\n');cat('\n'); print("SPONTNAEOUS REMINDING DISTRACTOR CUES BY CONDITION RESULTS")
# # # Is there a higher rate of overgeneralization in Category-building than comparison conditions?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
mod3CB <- glmer(reminding ~ condition + (1|pnum),
subset(data.reminding, principle == "Distractor"), family = 'binomial')
print(summary(mod3CB))
# # Sponteous reminding - distractor cues by condition and cue
cat('\n');cat('\n'); print("SPONTNAEOUS REMINDING DISTRACTOR CUES BY CUE RESULTS")
# # # Is there a higher rate of overgeneralization in Category-building than comparison conditions?
data.reminding$condition <- relevel(data.reminding$condition, ref = "Category-building")
mod4CA <- glmer(reminding ~ case + (1|pnum),
subset(data.reminding, principle == "Distractor"), family = 'binomial')
print(summary(mod4CA))
# # Final study task - performance by condition
cat('\n');cat('\n'); print("FINAL STUDY TASK PERFORMANCE BY CONDITION RESULTS")
# # # Does Category-building differ in the ability to articulate the target principle by the end of training from Double- and Single-comparison?
data.study.lf$condition <- relevel(data.study.lf$condition, ref = 'Category-building')
data.study.lf$principle <- relevel(data.study.lf$principle, ref = 'Problem-as-a-solution')
mod6CBa <- lm(final.task.score ~ condition*principle, data.study.lf)
print(summary(mod6CBa))
data.study.lf$condition <- relevel(data.study.lf$condition, ref = 'Category-building')
data.study.lf$principle <- relevel(data.study.lf$principle, ref = 'Convergence')
mod6CBb <- lm(final.task.score ~ condition*principle, data.study.lf)
print(summary(mod6CBb))
# # # Does Single-comparison for effect of target principle?
data.study.lf$condition <- relevel(data.study.lf$condition, ref = 'Summarize')
data.study.lf$principle <- relevel(data.study.lf$principle, ref = 'Problem-as-a-solution')
mod6SU <- lm(final.task.score ~ condition*principle, data.study.lf)
print(summary(mod6SU))
# # All study task - performance by condition
cat('\n');cat('\n'); print("ALL STUDY TASK PERFORMANCE BY CONDITION RESULTS")
data.study.lf2$condition <- relevel(data.study.lf2$condition, ref = 'Category-building')
data.study.lf2$principle <- relevel(data.study.lf2$principle, ref = 'Problem-as-a-solution')
mod7CBa <- lm(avgPerf ~ condition*principle, data.study.lf2)
print(summary(mod7CBa))
data.study.lf2$condition <- relevel(data.study.lf2$condition, ref = 'Category-building')
data.study.lf2$principle <- relevel(data.study.lf2$principle, ref = 'Convergence')
mod7CBb <- lm(avgPerf ~ condition*principle, data.study.lf2)
print(summary(mod7CBb))
# # # Does Single-comparison for effect of target principle?
data.study.lf2$condition <- relevel(data.study.lf2$condition, ref = 'Summarize')
data.study.lf2$principle <- relevel(data.study.lf2$principle, ref = 'Problem-as-a-solution')
mod7SU <- lm(avgPerf ~ condition*principle, data.study.lf2)
print(summary(mod7SU))
# # Study task - performance across task within each condition. Included in coverletter, but not manuscript
# study.cases$condition <- relevel(study.cases$condition, ref = 'Category-building')
# mod11CB <- lmer(explanation ~ position*condition + (1|id), data = study.cases)
# print(summary(mod11CB))
# study.cases$condition <- relevel(study.cases$condition, ref = 'Summarize')
# mod11SU <- lmer(explanation ~ position*condition + (1|id), data = study.cases)
# print(summary(mod11SU))
# # study task - performance by case aggregating over condition
study.cases$case <- relevel(study.cases$case, ref = 'avalanche')
mod12a <- lmer(explanation ~ case + (1|id), data = subset(study.cases, principle == 'Problem-as-a-solution'& case != "final.paas"))
summary(mod12a)
study.cases$case <- relevel(study.cases$case, ref = 'earthquake')
mod12b <- lmer(explanation ~ case + (1|id), data = subset(study.cases, principle == 'Problem-as-a-solution' & case != "final.paas"))
summary(mod12b)
study.cases$case <- relevel(study.cases$case, ref = 'solar')
mod12c <- lmer(explanation ~ case + (1|id), data = subset(study.cases, principle == 'Problem-as-a-solution' & case != "final.paas"))
summary(mod12c)
study.cases$case <- relevel(study.cases$case, ref = 'tank')
mod12d <- lmer(explanation ~ case + (1|id), data = subset(study.cases, principle == 'Convergence' & case != "final.conv"))
summary(mod12d)
study.cases$case <- relevel(study.cases$case, ref = 'wood')
mod12e <- lmer(explanation ~ case + (1|id), data = subset(study.cases, principle == 'Convergence' & case != "final.conv"))
summary(mod12e)
study.cases$case <- relevel(study.cases$case, ref = 'army')
mod12f <- lmer(explanation ~ case + (1|id), data = subset(study.cases, principle == 'Convergence' & case != "final.conv"))
summary(mod12f)
if(write.output == TRUE){
sink()
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Visualizations
# # Spontaneous remindings to target cues by condition and principle
# # # Get fitted values from regression models
plot1df <- subset(data.reminding, principle != 'Distractor')
plot1df$condition <- as.factor(as.character(plot1df$condition))
plot1df$fitted.cond <- predict(newdata = plot1df, mod1CB, type = 'response')
plot1df$fitted.princ <- predict(newdata = plot1df, mod2CBa, type = 'response')
# # # Add descriptives for fitted values to existing descriptives
plot1df.summary <- subset(descriptives.targets, condition != 'Combined')
plot1df.summary$fitted <- c(
ddply(plot1df, .(condition), summarize, fitted.reminding = mean(fitted.cond))$fitted.reminding,
ddply(plot1df, .(condition, principle), summarize, fitted.reminding = mean(fitted.princ))$fitted.reminding
)
# # # rename vars for plotting
plot1df.summary[,1]<-as.factor(plot1df.summary[,1])
levels(plot1df.summary[,1]) <- c("Category\nBuilding","Summarization","Total")
plot1df.summary[,2]<-as.factor(plot1df.summary[,2])
levels(plot1df.summary[,2]) <- c("Overall","Convergence","Distractor","Problem-as-a-Solution")
# # # create plot and write to disk
remind.plot <- ggplot(plot1df.summary,aes(x=condition, fill = condition, y = spont.reminding)) +
geom_bar(stat = 'identity', col = 'black',
fill = rep(c('grey85','grey25'),3))+
facet_grid(~principle) +
geom_errorbar(aes(ymin= lower95ci, ymax= upper95ci),width=.2) +
geom_point(data = plot1df.summary, aes(x=condition, color = condition, y = fitted), pch = 18,
size = 5, color = 'black') +
labs(y = "Proportion Structure-Based Reminding \n", x = "\n Study Task") +
coord_cartesian(ylim=c(0, 1)) +
scale_y_continuous(breaks=seq(0, 1, 0.1),expand = c(0,0)) +
theme_bw() +
theme(axis.text.x = element_text(face = "plain", color = "black", size = 12),
axis.title.x = element_text(face="bold", size=14),
axis.text.y = element_text(face = "plain", color = "black", size = 12),
axis.title.y = element_text(face="bold", size=16),
strip.text = element_text(face = "bold",size=14),
panel.grid.major.x = element_blank() ,
panel.grid.major.y = element_line(color = "grey"),
panel.grid.minor.y = element_line(color = "grey"),
axis.line = element_line(colour = "black"),
legend.position = "none")
ggsave('E3b remindings to target cues.png',remind.plot, width = 10.8, height = 6, units = 'in')
# # # Overgeneralization to distractors by condition and cue. Not included in manuscript.
# # # # Get fitted values from regression models
# plot2df <- subset(data.reminding, principle == 'Distractor')
# plot2df$condition <- as.factor(as.character(plot2df$condition))
# plot2df$fitted.cond <- predict(newdata = plot2df, mod4CB, type = 'response')
# plot2df$fitted.case <- predict(newdata = plot2df, mod5CB, type = 'response')
# # # # Add descriptives for fitted values to existing descriptives
# plot2df.summary <- subset(descriptives.distractors, condition != 'Combined')
# plot2df.summary$fitted <- c(
# ddply(plot2df, .(condition), summarize, fitted.reminding = mean(fitted.cond))$fitted.reminding,
# ddply(plot2df, .(condition, case), summarize, fitted.reminding = mean(fitted.case))$fitted.reminding
# )
# # # # rename vars for plotting
# plot2df.summary[,1]<-as.factor(plot2df.summary[,1])
# levels(plot2df.summary[,1]) <- c("Category\nBuilding","Double\nComparison","Single\nComparison","Total")
# plot2df.summary[,2]<-as.factor(plot2df.summary[,2])
# levels(plot2df.summary[,2]) <- c("Overall","aquarium","blackmarket","internetsec","tumor","The Birthday Party","The Wine Merchant")
# distract.plot <- ggplot(plot2df.summary,aes(x=condition, fill = case, y = spont.reminding)) +
# geom_bar(stat = 'identity', col = 'black',
# position = position_dodge())+
# geom_errorbar(aes(ymin= lower95ci, ymax= upper95ci),width=.2, position = position_dodge(.9)) +
# geom_point(data = plot2df.summary, aes(x=condition, fill = case, y = fitted), pch = 18,
# size = 5, color = 'black', position = position_dodge(.9)) +
# labs(y = "Proportion Overgeneralized \n", x = "\n Study Task", fill="Distractor") +
# coord_cartesian(ylim=c(0, 1)) +
# scale_y_continuous(breaks=seq(0, 1, 0.1),expand = c(0,0)) +
# scale_fill_manual(values = c('grey85','grey50', 'grey25')) +
# theme_bw() +
# theme(axis.text.x = element_text(face = "plain", color = "black", size = 12),
# axis.title.x = element_text(face="bold", size=14),
# axis.text.y = element_text(face = "plain", color = "black", size = 12),
# axis.title.y = element_text(face="bold", size=16),
# legend.text = element_text(size = 12),
# legend.title = element_text(size = 14),
# panel.grid.major.x = element_blank() ,
# panel.grid.major.y = element_line(color = "grey"),
# panel.grid.minor.y = element_line(color = "grey"),
# axis.line = element_line(colour = "black"),
# legend.position="bottom")
# ggsave('E3b remindings to distractor cues.png',distract.plot, width = 7, height = 7, units = 'in')
|
context("ExpressionSet to data.frame coercion")
Biobase::pData(eset)$pcol <- letters[1:3]
Biobase::fData(eset)$fcol <- letters[4:6]
test_that("maintains sample/feature order", {
out <- to_dataframe(eset)
expect_is(out$sample, "factor")
expect_equal(levels(out$sample), Biobase::sampleNames(eset))
expect_is(out$feature, "factor")
expect_equal(levels(out$feature), Biobase::featureNames(eset))
})
test_that("includes appropriate metadata", {
out <- to_dataframe(eset)
expect_equal(colnames(out), c("feature", "sample", "value", "pcol", "fcol"))
out <- to_dataframe(eset, add.pvars = FALSE)
expect_equal(colnames(out), c("feature", "sample", "value", "fcol"))
out <- to_dataframe(eset, add.fvars = FALSE)
expect_equal(colnames(out), c("feature", "sample", "value", "pcol"))
out <- to_dataframe(eset, add.pvars = FALSE, add.fvars = FALSE)
expect_equal(colnames(out), c("feature", "sample", "value"))
})
test_that("maintains metadata integrity", {
out <- to_dataframe(eset)
expect_equal(
unlist(split(out$pcol, out$sample), use.names = FALSE),
rep(letters[1:3], each = 3)
)
expect_equal(
unlist(split(out$fcol, out$feature), use.names = FALSE),
rep(letters[4:6], each = 3)
)
})
context("ExpressionSet from data.frame coercion")
Biobase::fData(eset)$fcol2 <- letters[7:9]
df <- to_dataframe(eset)
test_that("maintains sample/feature order", {
out <- from_dataframe(df, "sample", "feature", "value")
expect_s4_class(out, "ExpressionSet")
expect_equal(Biobase::sampleNames(out), levels(df$sample))
expect_equal(Biobase::featureNames(out), levels(df$feature))
})
test_that("includes appropriate metadata", {
out <- from_dataframe(df, "sample", "feature", "value")
expect_equal(colnames(Biobase::pData(out)), character())
expect_equal(colnames(Biobase::fData(out)), character())
out <- from_dataframe(df, "sample", "feature", "value", pvars = "pcol")
expect_equal(colnames(Biobase::pData(out)), "pcol")
out <- from_dataframe(df, "sample", "feature", "value",
fvars = c("fcol", "fcol2"))
expect_equal(colnames(Biobase::fData(out)), c("fcol", "fcol2"))
})
test_that("stops if sample/feature ids are not unique", {
df <- rbind(df, df[1, ])
expect_error(from_dataframe(df, "sample", "feature", "value"))
})
| /tests/testthat/test-coercion.r | no_license | aaronwolen/metafiler | R | false | false | 2,314 | r | context("ExpressionSet to data.frame coercion")
Biobase::pData(eset)$pcol <- letters[1:3]
Biobase::fData(eset)$fcol <- letters[4:6]
test_that("maintains sample/feature order", {
out <- to_dataframe(eset)
expect_is(out$sample, "factor")
expect_equal(levels(out$sample), Biobase::sampleNames(eset))
expect_is(out$feature, "factor")
expect_equal(levels(out$feature), Biobase::featureNames(eset))
})
test_that("includes appropriate metadata", {
out <- to_dataframe(eset)
expect_equal(colnames(out), c("feature", "sample", "value", "pcol", "fcol"))
out <- to_dataframe(eset, add.pvars = FALSE)
expect_equal(colnames(out), c("feature", "sample", "value", "fcol"))
out <- to_dataframe(eset, add.fvars = FALSE)
expect_equal(colnames(out), c("feature", "sample", "value", "pcol"))
out <- to_dataframe(eset, add.pvars = FALSE, add.fvars = FALSE)
expect_equal(colnames(out), c("feature", "sample", "value"))
})
test_that("maintains metadata integrity", {
out <- to_dataframe(eset)
expect_equal(
unlist(split(out$pcol, out$sample), use.names = FALSE),
rep(letters[1:3], each = 3)
)
expect_equal(
unlist(split(out$fcol, out$feature), use.names = FALSE),
rep(letters[4:6], each = 3)
)
})
context("ExpressionSet from data.frame coercion")
Biobase::fData(eset)$fcol2 <- letters[7:9]
df <- to_dataframe(eset)
test_that("maintains sample/feature order", {
out <- from_dataframe(df, "sample", "feature", "value")
expect_s4_class(out, "ExpressionSet")
expect_equal(Biobase::sampleNames(out), levels(df$sample))
expect_equal(Biobase::featureNames(out), levels(df$feature))
})
test_that("includes appropriate metadata", {
out <- from_dataframe(df, "sample", "feature", "value")
expect_equal(colnames(Biobase::pData(out)), character())
expect_equal(colnames(Biobase::fData(out)), character())
out <- from_dataframe(df, "sample", "feature", "value", pvars = "pcol")
expect_equal(colnames(Biobase::pData(out)), "pcol")
out <- from_dataframe(df, "sample", "feature", "value",
fvars = c("fcol", "fcol2"))
expect_equal(colnames(Biobase::fData(out)), c("fcol", "fcol2"))
})
test_that("stops if sample/feature ids are not unique", {
df <- rbind(df, df[1, ])
expect_error(from_dataframe(df, "sample", "feature", "value"))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/btbv5_radi_fac_only.R
\name{btbv5_radi_fac_only}
\alias{btbv5_radi_fac_only}
\title{btbv5_radi_fac_only}
\usage{
btbv5_radi_fac_only(df, osa_group_p, osa_class_p, osa_specialization_p)
}
\arguments{
\item{osa_specialization_p}{}
}
\value{
}
\description{
btbv5_radi_fac_only
}
| /man/btbv5_radi_fac_only.Rd | no_license | utah-osa/hcctools2 | R | false | true | 356 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/btbv5_radi_fac_only.R
\name{btbv5_radi_fac_only}
\alias{btbv5_radi_fac_only}
\title{btbv5_radi_fac_only}
\usage{
btbv5_radi_fac_only(df, osa_group_p, osa_class_p, osa_specialization_p)
}
\arguments{
\item{osa_specialization_p}{}
}
\value{
}
\description{
btbv5_radi_fac_only
}
|
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -3.98726823437481e+288, -2.0832188221259e+111, -1.14701541500338e-54, -1.02447057530812e-112, 4.77030395728052e+121, 3.47587764874075e+275, 1.3550214789627e+185))
result <- do.call(dcurver:::ddc,testlist)
str(result) | /dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609866518-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 796 | r | testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -3.98726823437481e+288, -2.0832188221259e+111, -1.14701541500338e-54, -1.02447057530812e-112, 4.77030395728052e+121, 3.47587764874075e+275, 1.3550214789627e+185))
result <- do.call(dcurver:::ddc,testlist)
str(result) |
### ===== actuar: An R Package for Actuarial Science =====
###
### Creation of grouped data objects
###
### See Klugman, Panjer & Willmot, Loss Models, Wiley, 1998.
###
### AUTHORS: Vincent Goulet <vincent.goulet@act.ulaval.ca>,
### Mathieu Pigeon, Louis-Philippe Pouliot
grouped.data <- function(..., right = TRUE, row.names = NULL,
check.rows = FALSE, check.names = TRUE)
{
## Utility function
numform <- function(x, w)
formatC(x, digits = 2, width = w, format = "fg")
## The function must be called with at least two arguments. The
## first is the vector of group boundaries. The others are vectors
## of group frequencies. All arguments will be converted to data
## frames.
x <- list(...)
xnames <- names(x) # preserve names
y <- as.data.frame(x[-1]) # group frequencies
x <- as.data.frame(x[[1]]) # group boundaries
nx <- nrow(x)
ny <- nrow(y)
## There must be exactly one group boundary more than frequencies.
if (nx - ny != 1)
stop("invalid number of group boundaries and frequencies")
## Replace missing frequencies by zeros.
nax <- is.na(x)
if (any(nax))
{
x[nax] <- 0
warning("missing frequencies replaced by zeros")
}
## Return a data frame with formatted group boundaries in the
## first column.
w <- max(nchar(x[-1, ])) # longest upper boundary
xfmt <- paste(if (right) "(" else "[",
numform(x[-nx, ], -1), ", ", numform(x[-1, ], w),
if (right) "]" else ")",
sep = "")
res <- data.frame(xfmt, y, row.names = row.names, check.rows = check.rows,
check.names = check.names)
names(res) <- c(xnames[1], names(y))
class(res) <- c("grouped.data", "data.frame")
environment(res) <- new.env()
assign("cj", unlist(x, use.names = FALSE), environment(res))
attr(res, "right") <- right
res
}
| /actuar/R/grouped.data.R | no_license | ingted/R-Examples | R | false | false | 1,997 | r | ### ===== actuar: An R Package for Actuarial Science =====
###
### Creation of grouped data objects
###
### See Klugman, Panjer & Willmot, Loss Models, Wiley, 1998.
###
### AUTHORS: Vincent Goulet <vincent.goulet@act.ulaval.ca>,
### Mathieu Pigeon, Louis-Philippe Pouliot
grouped.data <- function(..., right = TRUE, row.names = NULL,
check.rows = FALSE, check.names = TRUE)
{
## Utility function
numform <- function(x, w)
formatC(x, digits = 2, width = w, format = "fg")
## The function must be called with at least two arguments. The
## first is the vector of group boundaries. The others are vectors
## of group frequencies. All arguments will be converted to data
## frames.
x <- list(...)
xnames <- names(x) # preserve names
y <- as.data.frame(x[-1]) # group frequencies
x <- as.data.frame(x[[1]]) # group boundaries
nx <- nrow(x)
ny <- nrow(y)
## There must be exactly one group boundary more than frequencies.
if (nx - ny != 1)
stop("invalid number of group boundaries and frequencies")
## Replace missing frequencies by zeros.
nax <- is.na(x)
if (any(nax))
{
x[nax] <- 0
warning("missing frequencies replaced by zeros")
}
## Return a data frame with formatted group boundaries in the
## first column.
w <- max(nchar(x[-1, ])) # longest upper boundary
xfmt <- paste(if (right) "(" else "[",
numform(x[-nx, ], -1), ", ", numform(x[-1, ], w),
if (right) "]" else ")",
sep = "")
res <- data.frame(xfmt, y, row.names = row.names, check.rows = check.rows,
check.names = check.names)
names(res) <- c(xnames[1], names(y))
class(res) <- c("grouped.data", "data.frame")
environment(res) <- new.env()
assign("cj", unlist(x, use.names = FALSE), environment(res))
attr(res, "right") <- right
res
}
|
\name{power.mean}
\alias{power.mean}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
power.mean(values, order = 1, weights = rep(1, length(values)))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{values}{
%% ~~Describe \code{values} here~~
}
\item{order}{
%% ~~Describe \code{order} here~~
}
\item{weights}{
%% ~~Describe \code{weights} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (values, order = 1, weights = rep(1, length(values)))
{
proportions <- weights/sum(weights)
if (length(values) != length(weights))
stop("The number of values does not equal the number of weights, please check arguments")
if (any(values[!is.nan(values)] < 0))
stop("Check that values (argument) are non-negative.")
if (all(is.nan(proportions)))
return(NaN)
if (order > 0) {
if (is.infinite(order)) {
max(values[weights > 0])
}
else if (isTRUE(all.equal(order, 0))) {
prod(values[weights > 0]^proportions[weights > 0])
}
else {
sum(proportions[weights > 0] * values[weights > 0]^order)^(1/order)
}
}
else {
if (is.infinite(order)) {
min(values[weights > 0])
}
else if (isTRUE(all.equal(order, 0))) {
prod(values[weights > 0]^proportions[weights > 0])
}
else {
sum(proportions[weights > 0] * values[weights > 0]^order)^(1/order)
}
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/power.mean.Rd | permissive | ljallen/RDiversity | R | false | false | 2,467 | rd | \name{power.mean}
\alias{power.mean}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
power.mean(values, order = 1, weights = rep(1, length(values)))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{values}{
%% ~~Describe \code{values} here~~
}
\item{order}{
%% ~~Describe \code{order} here~~
}
\item{weights}{
%% ~~Describe \code{weights} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (values, order = 1, weights = rep(1, length(values)))
{
proportions <- weights/sum(weights)
if (length(values) != length(weights))
stop("The number of values does not equal the number of weights, please check arguments")
if (any(values[!is.nan(values)] < 0))
stop("Check that values (argument) are non-negative.")
if (all(is.nan(proportions)))
return(NaN)
if (order > 0) {
if (is.infinite(order)) {
max(values[weights > 0])
}
else if (isTRUE(all.equal(order, 0))) {
prod(values[weights > 0]^proportions[weights > 0])
}
else {
sum(proportions[weights > 0] * values[weights > 0]^order)^(1/order)
}
}
else {
if (is.infinite(order)) {
min(values[weights > 0])
}
else if (isTRUE(all.equal(order, 0))) {
prod(values[weights > 0]^proportions[weights > 0])
}
else {
sum(proportions[weights > 0] * values[weights > 0]^order)^(1/order)
}
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
A = matrix(1:100, nrow=6)
B = matrix(1:1000, nrow=6)
#make nrow a multiple of 100
A = matrix(1:100, nrow=10)
#make nrow a multiple of 1000
B = matrix(1:1000, nrow=10)
#Transpose A and B
t(A)
t(B)
#create two vectors (a and b)
a = c(1:2)
b = c(1:4)
#multpily matrices by vectors
X = a*A
Y = b*B
#re-assign the vectors a and b to equal the number of rows of the column for the corresponding matrix
a=c(1:10)
b=c(1:100)
#Multiply the matrix by a matrix
A %*% a
B %*% b
#Inverse a matrix
S=matrix(2:5, nrow=2)
#check det()
det(S)
#inverse the matrix
solve(S)
#create matrix using runif()
A=matrix(runif(25, min=0, max = 50), nrow=5)
#run new matrix through det()
det(A)
#inverse
solve(A)
| /Assignment 5.R | no_license | MsOshie/sturdy-octo-telegram | R | false | false | 697 | r | A = matrix(1:100, nrow=6)
B = matrix(1:1000, nrow=6)
#make nrow a multiple of 100
A = matrix(1:100, nrow=10)
#make nrow a multiple of 1000
B = matrix(1:1000, nrow=10)
#Transpose A and B
t(A)
t(B)
#create two vectors (a and b)
a = c(1:2)
b = c(1:4)
#multpily matrices by vectors
X = a*A
Y = b*B
#re-assign the vectors a and b to equal the number of rows of the column for the corresponding matrix
a=c(1:10)
b=c(1:100)
#Multiply the matrix by a matrix
A %*% a
B %*% b
#Inverse a matrix
S=matrix(2:5, nrow=2)
#check det()
det(S)
#inverse the matrix
solve(S)
#create matrix using runif()
A=matrix(runif(25, min=0, max = 50), nrow=5)
#run new matrix through det()
det(A)
#inverse
solve(A)
|
# SIMULATION DATA ANALYSIS # 2
# Create own plot folder
plot_folder <- "~/Dropbox/dissertation/paper_3/plots/"
library(MASS)
library(rstan)
library(cmdstanr)
library(data.table)
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
set_cmdstan_path("~/cmdstan/")
beta_bin_lsm <- cmdstan_model("stan_scripts/multi_beta_bin_lsm_multidim.stan")
beta_bin_lsm_interval <- cmdstan_model("stan_scripts/interval_multi_bb_lsm_multidim.stan")
set.seed(12345)
params <- data.frame(
means = sample(c(.05, .05, .1, .1, .2, .9), 6),
iccs = sample(c(.2, .2, .2, .3, .3, .5), 6),
lambdas = 1)
params
params$precs <- exp(-qlogis(params$iccs))
params$lambdas[params$means == .9] <- params$lambdas[params$means == .9] * -1
params
n.i <- nrow(params)
(Lambda <- matrix(c(params$lambdas[1:3], rep(0, 6), params$lambdas[4:6]), ncol = 2))
(R <- matrix(.5, 3, 3) + .5 * diag(3))
R[3, 1] <- R[1, 3] <- .35
R[3, 2] <- R[2, 3] <- .25
R
set.seed(12345)
cov(Eta <- mvrnorm(2e3, rep(0, 3), R, empirical = TRUE))
plot(Eta)
cov(M1 <- t(apply(Eta, 1, function (x) { Lambda %*% x[1:2] + qlogis(params$means) })))
plogis(colMeans(M1))
cov(M2 <- t(matrix(rep(1, n.i)) %*% Eta[, 3] + log(params$precs)))
exp(colMeans(M2))
cov(cbind(M1, M2))
set.seed(12345)
N <- matrix(nrow = nrow(M1), ncol = ncol(M1))
for (j in 1:ncol(M1)) {
p <- plogis(M1[, j])
prec <- exp(M2[, j])
p <- rbeta(nrow(N), p * prec, (1 - p) * prec)
N[, j] <- rbinom(nrow(N), 7, p)
}
rm(j, p)
plot(as.data.frame(N))
cor(as.data.frame(N))
apply(N, 2, median)
apply(N, 2, mean)
apply(N, 2, sd)
N.df <- as.data.frame(N)
colnames(N.df) <- paste0("n.", 1:ncol(N.df))
N.df <- na.omit(reshape(N.df, direction = "long", 1:ncol(N)))
N.df.dt <- as.data.table(N.df)
(N.df.dt <- N.df.dt[, .N, list(time, n)])
params$time <- 1:nrow(params)
N.df.dt <- merge(N.df.dt, params)
N.df.dt[, means.t := paste0("mean = ", means)]
N.df.dt[, iccs.t := paste0("ICC = ", iccs)]
N.df.dt[, factor := factor(ifelse(time < 4, 1, 2))]
N.df.dt
library(scales)
ggplot(N.df.dt, aes(n, N)) +
geom_bar(col = 1, stat = "identity", position = position_dodge(.5)) +
facet_wrap(~ reorder(paste0("item ", time, "\n", means.t, "\n", iccs.t), time), nrow = 1) +
theme_bw() + theme(legend.position = "top") +
labs(x = "Counts", y = "Frequency") +
scale_fill_manual(values = cbPalette)
ggsave(paste0(plot_folder, "sim_item_dist_2.pdf"), width = 6.5, height = 3.5)
dat.list <- list(
Np = nrow(N), Ni = ncol(N), NpNi = nrow(N.df), N = 7,
n_1d = N.df$n, p_ids = N.df$id, i_ids = N.df$time, shape_r = 2,
lambda_median = log(.5), lambda_scale = log(4 / .5) / qnorm(.99),
Nf = ncol(Lambda), Nl = sum(Lambda != 0),
Load_Pattern = (Lambda > 0) + -1 * (Lambda < 0))
multi_beta_bin_lsm <- cmdstan_model("stan_scripts/multi_beta_bin_lsm_multidim.stan")
mbb.lsm.fit <- multi_beta_bin_lsm$sample(
data = dat.list, seed = 12345, iter_warmup = 750,
iter_sampling = 750, chains = 3, parallel_chains = 3)
mbb.lsm.fit$cmdstan_diagnose()
mbb.lsm.fit <- read_stan_csv(mbb.lsm.fit$output_files())
print(mbb.lsm.fit, c("Eta", "R_chol"), digits_summary = 3, include = FALSE)
rowSums(get_elapsed_time(mbb.lsm.fit))
# chain:1 chain:2 chain:3
# 412.848 390.260 400.484
params$means
print(mbb.lsm.fit, c("p"), digits_summary = 3)
params$iccs
print(mbb.lsm.fit, c("rho"), digits_summary = 3)
L.idxs <- paste0(
"Lambda_mat[", apply(which(Lambda != 0, arr.ind = TRUE), 1, paste0, collapse = ","), "]")
params$lambda
print(mbb.lsm.fit, L.idxs, digits_summary = 3)
R
print(mbb.lsm.fit, c("R[1,2]", "R[1,3]", "R[2,3]", "sigma_eta"), digits_summary = 3,
probs = c(.025, .05, .25, .5, .75, .95, .975))
# mean se_mean sd 2.5% 5% 25% 50% 75% 95% 97.5% n_eff Rhat
# R[1,2] 0.521 0.003 0.045 0.429 0.445 0.493 0.522 0.553 0.591 0.603 171 1.011
# R[1,3] -0.291 0.006 0.069 -0.428 -0.406 -0.335 -0.291 -0.244 -0.177 -0.155 132 1.015
# R[2,3] -0.244 0.005 0.068 -0.373 -0.352 -0.291 -0.248 -0.198 -0.128 -0.105 177 1.027
# sigma_eta 1.062 0.003 0.066 0.938 0.952 1.018 1.062 1.106 1.172 1.191 649 1.007
table(N.df$q <- cut(dat.list$n_1d, c(-1, 0, 2, 4, 7)))
table(N.df$q.int <- as.integer(N.df$q))
dat.list.int <- dat.list
dat.list.int$n_1d <- N.df$q.int
dat.list.int$N_int <- 4
# dat.list.int$cuts <- c(0, 0, 2, 4, 7)
(dat.list.int$cuts <- matrix(c(0, 0, 1, 2, 3, 4, 5, 7), byrow = TRUE, ncol = 2))
dat.list.int$count_1 <- sum(dat.list.int$n_1d == 1)
dat.list.int$count_max <- sum(dat.list.int$n_1d == 4)
dat.list.int$pos_1 <- which(dat.list.int$n_1d == 1)
dat.list.int$pos_max <- which(dat.list.int$n_1d == 4)
# dat.list.int$shape_r <- 3
# dat.list.int$lambda_scale <- log(3 / .5) / qnorm(.99)
dat.list.int$cuts
table(dat.list$n_1d, dat.list.int$n_1d)
# 101
int_multi_bb_lsm <- cmdstan_model("stan_scripts/interval_multi_bb_lsm_multidim.stan")
i.lsm.mbb.fit <- int_multi_bb_lsm$sample(
data = dat.list.int, seed = 12345, iter_warmup = 750,
iter_sampling = 750, chains = 3, parallel_chains = 3,
init = function () list(
lambda_p = rep(.5, dat.list.int$Ni), p_lgt = rep(0, dat.list.int$Ni),
r_lgt = rep(0, dat.list.int$Ni), sigma_eta = .5))
i.lsm.mbb.fit$cmdstan_diagnose()
i.lsm.mbb.fit <- read_stan_csv(i.lsm.mbb.fit$output_files())
print(i.lsm.mbb.fit, c("Eta_pr"), digits_summary = 3, include = FALSE)
rowSums(get_elapsed_time(i.lsm.mbb.fit))
# chain:1 chain:2 chain:3
# 869.462 842.473 862.813
print(mbb.lsm.fit, c("R[1,2]", "R[1,3]", "R[2,3]", "sigma_eta"), digits_summary = 3,
probs = c(.025, .05, .25, .5, .75, .95, .975))
# mean se_mean sd 2.5% 5% 25% 50% 75% 95% 97.5% n_eff Rhat
# R[1,2] 0.521 0.003 0.045 0.429 0.445 0.493 0.522 0.553 0.591 0.603 171 1.011
# R[1,3] -0.291 0.006 0.069 -0.428 -0.406 -0.335 -0.291 -0.244 -0.177 -0.155 132 1.015
# R[2,3] -0.244 0.005 0.068 -0.373 -0.352 -0.291 -0.248 -0.198 -0.128 -0.105 177 1.027
# sigma_eta 1.062 0.003 0.066 0.938 0.952 1.018 1.062 1.106 1.172 1.191 649 1.007
print(i.lsm.mbb.fit, c("R[1,2]", "R[1,3]", "R[2,3]", "sigma_eta"), digits_summary = 3,
probs = c(.025, .05, .25, .5, .75, .95, .975))
# mean se_mean sd 2.5% 5% 25% 50% 75% 95% 97.5% n_eff Rhat
# R[1,2] 0.523 0.002 0.047 0.429 0.445 0.492 0.524 0.556 0.597 0.613 427 1.003
# R[1,3] -0.298 0.004 0.085 -0.477 -0.446 -0.352 -0.297 -0.240 -0.164 -0.138 428 1.010
# R[2,3] -0.218 0.004 0.086 -0.387 -0.360 -0.274 -0.216 -0.161 -0.084 -0.058 369 1.006
# sigma_eta 0.984 0.005 0.093 0.811 0.838 0.922 0.982 1.044 1.138 1.167 372 1.011
params$means
print(i.lsm.mbb.fit, c("p"), digits_summary = 3)
params$iccs
print(i.lsm.mbb.fit, c("rho"), digits_summary = 3)
params$lambda
print(i.lsm.mbb.fit, L.idxs, digits_summary = 3)
lsm.fit.df <- as.data.frame(summary(
mbb.lsm.fit, c("R[1,2]", "R[1,3]", "R[2,3]", "sigma_eta", L.idxs,
"p", "rho"), probs = c(.05, .5, .95))$summary)
lsm.fit.df$params <- rownames(lsm.fit.df)
i.lsm.fit.df <- as.data.frame(summary(
i.lsm.mbb.fit, c("R[1,2]", "R[1,3]", "R[2,3]", "sigma_eta", L.idxs,
"p", "rho"), probs = c(.05, .5, .95))$summary)
i.lsm.fit.df$params <- rownames(i.lsm.fit.df)
comb.df <- rbindlist(list(lsm.fit.df, i.lsm.fit.df), idcol = "fit")
comb.df
comb.df[, class := gsub("\\[\\d(,\\d)?\\]", "", params)]
comb.df[, class := ifelse(class == "Lambda_mat", "loading", class)]
comb.df[, class := ifelse(class == "p", "mean", class)]
comb.df[, class := ifelse(class == "rho", "ICC", class)]
comb.df[, class := ifelse(class == "sigma_eta", "load_icc", class)]
comb.df[, item := as.integer(gsub("[a-z]+|[A-Z]+|\\[|\\]|\\_|,\\d", "", params))]
setnames(comb.df, c("50%", "5%", "95%"), c("median", "ll", "ul"))
comb.df
comb.df[, pos := ifelse(
class == "loading", as.integer(item), ifelse(
class == "mean", as.integer(item) + 6, ifelse(
class == "ICC", as.integer(item) + 12, 13)))]
params$item <- params$time
comb.df <- merge(comb.df, params, all.x = TRUE)
comb.df
comb.df[, pop_param := ifelse(
class == "mean", means, ifelse(
class == "loading", abs(lambdas), ifelse(
class == "ICC", iccs, ifelse(
class == "load_ICC", 1, NA))))]
comb.df[, fit.t := factor(
fit, labels = c("Location-ICC", "Location-ICC (binned data)"))]
comb.df
comb.df[pop_param == .9 & class == "mean", median := 1 - median]
comb.df[pop_param == .9 & class == "mean", ll := 1 - ll]
comb.df[pop_param == .9 & class == "mean", ul := 1 - ul]
comb.df[pop_param == .9 & class == "mean", pop_param := 1 - pop_param]
comb.df
library(ggplot2)
library(ggforce)
library(scales)
# Fits: 1 = Location-ICC, 2 = Location-ICC (binned data)
# More extreme loadings from location-only fit
# Smaller ICCs (in high ICC models) from location-only fit
plt.1 <- ggplot(comb.df[class %in% c("loading", "mean", "ICC")],
aes(item, abs(median), fill = fit.t)) +
geom_segment(aes(x = item - .25, xend = item + .25, y = pop_param, yend = pop_param)) +
geom_linerange(aes(ymin = abs(ll), ymax = abs(ul), col = fit.t),
position = position_dodge(.5)) +
facet_wrap(~ reorder(class, pos), scales = "free") +
theme_bw() +
scale_x_continuous(labels = 1:7, breaks = 1:7) +
scale_color_manual(values = cbPalette) +
theme(legend.position = "top", strip.background = element_blank(),
panel.border = element_blank(), panel.grid.minor = element_blank(),
axis.ticks = element_blank(), panel.grid.major.x = element_blank()) +
labs(x = "Item ID", y = "90% quantile intervals", fill = "", col = "")
cbrdf <- comb.df[class == "R"]
cbrdf$class <- "|R|"
cbrdf$pop_param <- c(rep(c(.5, .35), 2), rep(.25, 2))
cbrdf$item <- c(rep(c(1, 2), 2), rep(3, 2))
cbrdf$item.f <- c(rep(c("F1-F2", "F1-F3"), 2), rep("F2-F3", 2))
plt.2 <- ggplot(cbrdf, aes(item, abs(median), fill = fit.t)) +
geom_linerange(aes(ymin = abs(ll), ymax = abs(ul), col = fit.t),
position = position_dodge(.5)) +
geom_segment(aes(x = item - .25, xend = item + .25, y = pop_param, yend = pop_param)) +
facet_wrap(~ reorder(class, pos), scales = "free") +
scale_x_continuous(labels = c("F1-F2", "F1-F3", "F2-F3"), breaks = 1:3) +
scale_y_continuous(breaks = c(.1, .25, .35, .5, .6)) +
theme_bw() + guides(col = FALSE) +
scale_color_manual(values = cbPalette) +
theme(legend.position = "top", strip.background = element_blank(),
panel.border = element_blank(), panel.grid.minor = element_blank(),
axis.ticks = element_blank(), panel.grid.major.x = element_blank(),
axis.title.y = element_blank()) +
labs(x = "Correlation", y = "90% quantile intervals", fill = "", col = "")
library(patchwork)
plt.1 + plt.2 + plot_layout(widths = c(3, 1))
ggsave("plots/sim_2_res.pdf", height = 3.5, width = 6.5)
ggsave(paste0(plot_folder, "sim_2_res.pdf"), height = 3.5, width = 6.5)
| /paper_3/sim_study_2.R | no_license | stonegold546/dissertation_papers | R | false | false | 11,042 | r | # SIMULATION DATA ANALYSIS # 2
# Create own plot folder
plot_folder <- "~/Dropbox/dissertation/paper_3/plots/"
library(MASS)
library(rstan)
library(cmdstanr)
library(data.table)
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
set_cmdstan_path("~/cmdstan/")
beta_bin_lsm <- cmdstan_model("stan_scripts/multi_beta_bin_lsm_multidim.stan")
beta_bin_lsm_interval <- cmdstan_model("stan_scripts/interval_multi_bb_lsm_multidim.stan")
set.seed(12345)
params <- data.frame(
means = sample(c(.05, .05, .1, .1, .2, .9), 6),
iccs = sample(c(.2, .2, .2, .3, .3, .5), 6),
lambdas = 1)
params
params$precs <- exp(-qlogis(params$iccs))
params$lambdas[params$means == .9] <- params$lambdas[params$means == .9] * -1
params
n.i <- nrow(params)
(Lambda <- matrix(c(params$lambdas[1:3], rep(0, 6), params$lambdas[4:6]), ncol = 2))
(R <- matrix(.5, 3, 3) + .5 * diag(3))
R[3, 1] <- R[1, 3] <- .35
R[3, 2] <- R[2, 3] <- .25
R
set.seed(12345)
cov(Eta <- mvrnorm(2e3, rep(0, 3), R, empirical = TRUE))
plot(Eta)
cov(M1 <- t(apply(Eta, 1, function (x) { Lambda %*% x[1:2] + qlogis(params$means) })))
plogis(colMeans(M1))
cov(M2 <- t(matrix(rep(1, n.i)) %*% Eta[, 3] + log(params$precs)))
exp(colMeans(M2))
cov(cbind(M1, M2))
set.seed(12345)
N <- matrix(nrow = nrow(M1), ncol = ncol(M1))
for (j in 1:ncol(M1)) {
p <- plogis(M1[, j])
prec <- exp(M2[, j])
p <- rbeta(nrow(N), p * prec, (1 - p) * prec)
N[, j] <- rbinom(nrow(N), 7, p)
}
rm(j, p)
plot(as.data.frame(N))
cor(as.data.frame(N))
apply(N, 2, median)
apply(N, 2, mean)
apply(N, 2, sd)
N.df <- as.data.frame(N)
colnames(N.df) <- paste0("n.", 1:ncol(N.df))
N.df <- na.omit(reshape(N.df, direction = "long", 1:ncol(N)))
N.df.dt <- as.data.table(N.df)
(N.df.dt <- N.df.dt[, .N, list(time, n)])
params$time <- 1:nrow(params)
N.df.dt <- merge(N.df.dt, params)
N.df.dt[, means.t := paste0("mean = ", means)]
N.df.dt[, iccs.t := paste0("ICC = ", iccs)]
N.df.dt[, factor := factor(ifelse(time < 4, 1, 2))]
N.df.dt
library(scales)
ggplot(N.df.dt, aes(n, N)) +
geom_bar(col = 1, stat = "identity", position = position_dodge(.5)) +
facet_wrap(~ reorder(paste0("item ", time, "\n", means.t, "\n", iccs.t), time), nrow = 1) +
theme_bw() + theme(legend.position = "top") +
labs(x = "Counts", y = "Frequency") +
scale_fill_manual(values = cbPalette)
ggsave(paste0(plot_folder, "sim_item_dist_2.pdf"), width = 6.5, height = 3.5)
dat.list <- list(
Np = nrow(N), Ni = ncol(N), NpNi = nrow(N.df), N = 7,
n_1d = N.df$n, p_ids = N.df$id, i_ids = N.df$time, shape_r = 2,
lambda_median = log(.5), lambda_scale = log(4 / .5) / qnorm(.99),
Nf = ncol(Lambda), Nl = sum(Lambda != 0),
Load_Pattern = (Lambda > 0) + -1 * (Lambda < 0))
multi_beta_bin_lsm <- cmdstan_model("stan_scripts/multi_beta_bin_lsm_multidim.stan")
mbb.lsm.fit <- multi_beta_bin_lsm$sample(
data = dat.list, seed = 12345, iter_warmup = 750,
iter_sampling = 750, chains = 3, parallel_chains = 3)
mbb.lsm.fit$cmdstan_diagnose()
mbb.lsm.fit <- read_stan_csv(mbb.lsm.fit$output_files())
print(mbb.lsm.fit, c("Eta", "R_chol"), digits_summary = 3, include = FALSE)
rowSums(get_elapsed_time(mbb.lsm.fit))
# chain:1 chain:2 chain:3
# 412.848 390.260 400.484
params$means
print(mbb.lsm.fit, c("p"), digits_summary = 3)
params$iccs
print(mbb.lsm.fit, c("rho"), digits_summary = 3)
L.idxs <- paste0(
"Lambda_mat[", apply(which(Lambda != 0, arr.ind = TRUE), 1, paste0, collapse = ","), "]")
params$lambda
print(mbb.lsm.fit, L.idxs, digits_summary = 3)
R
print(mbb.lsm.fit, c("R[1,2]", "R[1,3]", "R[2,3]", "sigma_eta"), digits_summary = 3,
probs = c(.025, .05, .25, .5, .75, .95, .975))
# mean se_mean sd 2.5% 5% 25% 50% 75% 95% 97.5% n_eff Rhat
# R[1,2] 0.521 0.003 0.045 0.429 0.445 0.493 0.522 0.553 0.591 0.603 171 1.011
# R[1,3] -0.291 0.006 0.069 -0.428 -0.406 -0.335 -0.291 -0.244 -0.177 -0.155 132 1.015
# R[2,3] -0.244 0.005 0.068 -0.373 -0.352 -0.291 -0.248 -0.198 -0.128 -0.105 177 1.027
# sigma_eta 1.062 0.003 0.066 0.938 0.952 1.018 1.062 1.106 1.172 1.191 649 1.007
table(N.df$q <- cut(dat.list$n_1d, c(-1, 0, 2, 4, 7)))
table(N.df$q.int <- as.integer(N.df$q))
dat.list.int <- dat.list
dat.list.int$n_1d <- N.df$q.int
dat.list.int$N_int <- 4
# dat.list.int$cuts <- c(0, 0, 2, 4, 7)
(dat.list.int$cuts <- matrix(c(0, 0, 1, 2, 3, 4, 5, 7), byrow = TRUE, ncol = 2))
dat.list.int$count_1 <- sum(dat.list.int$n_1d == 1)
dat.list.int$count_max <- sum(dat.list.int$n_1d == 4)
dat.list.int$pos_1 <- which(dat.list.int$n_1d == 1)
dat.list.int$pos_max <- which(dat.list.int$n_1d == 4)
# dat.list.int$shape_r <- 3
# dat.list.int$lambda_scale <- log(3 / .5) / qnorm(.99)
dat.list.int$cuts
table(dat.list$n_1d, dat.list.int$n_1d)
# 101
int_multi_bb_lsm <- cmdstan_model("stan_scripts/interval_multi_bb_lsm_multidim.stan")
i.lsm.mbb.fit <- int_multi_bb_lsm$sample(
data = dat.list.int, seed = 12345, iter_warmup = 750,
iter_sampling = 750, chains = 3, parallel_chains = 3,
init = function () list(
lambda_p = rep(.5, dat.list.int$Ni), p_lgt = rep(0, dat.list.int$Ni),
r_lgt = rep(0, dat.list.int$Ni), sigma_eta = .5))
i.lsm.mbb.fit$cmdstan_diagnose()
i.lsm.mbb.fit <- read_stan_csv(i.lsm.mbb.fit$output_files())
print(i.lsm.mbb.fit, c("Eta_pr"), digits_summary = 3, include = FALSE)
rowSums(get_elapsed_time(i.lsm.mbb.fit))
# chain:1 chain:2 chain:3
# 869.462 842.473 862.813
print(mbb.lsm.fit, c("R[1,2]", "R[1,3]", "R[2,3]", "sigma_eta"), digits_summary = 3,
probs = c(.025, .05, .25, .5, .75, .95, .975))
# mean se_mean sd 2.5% 5% 25% 50% 75% 95% 97.5% n_eff Rhat
# R[1,2] 0.521 0.003 0.045 0.429 0.445 0.493 0.522 0.553 0.591 0.603 171 1.011
# R[1,3] -0.291 0.006 0.069 -0.428 -0.406 -0.335 -0.291 -0.244 -0.177 -0.155 132 1.015
# R[2,3] -0.244 0.005 0.068 -0.373 -0.352 -0.291 -0.248 -0.198 -0.128 -0.105 177 1.027
# sigma_eta 1.062 0.003 0.066 0.938 0.952 1.018 1.062 1.106 1.172 1.191 649 1.007
print(i.lsm.mbb.fit, c("R[1,2]", "R[1,3]", "R[2,3]", "sigma_eta"), digits_summary = 3,
probs = c(.025, .05, .25, .5, .75, .95, .975))
# mean se_mean sd 2.5% 5% 25% 50% 75% 95% 97.5% n_eff Rhat
# R[1,2] 0.523 0.002 0.047 0.429 0.445 0.492 0.524 0.556 0.597 0.613 427 1.003
# R[1,3] -0.298 0.004 0.085 -0.477 -0.446 -0.352 -0.297 -0.240 -0.164 -0.138 428 1.010
# R[2,3] -0.218 0.004 0.086 -0.387 -0.360 -0.274 -0.216 -0.161 -0.084 -0.058 369 1.006
# sigma_eta 0.984 0.005 0.093 0.811 0.838 0.922 0.982 1.044 1.138 1.167 372 1.011
params$means
print(i.lsm.mbb.fit, c("p"), digits_summary = 3)
params$iccs
print(i.lsm.mbb.fit, c("rho"), digits_summary = 3)
params$lambda
print(i.lsm.mbb.fit, L.idxs, digits_summary = 3)
lsm.fit.df <- as.data.frame(summary(
mbb.lsm.fit, c("R[1,2]", "R[1,3]", "R[2,3]", "sigma_eta", L.idxs,
"p", "rho"), probs = c(.05, .5, .95))$summary)
lsm.fit.df$params <- rownames(lsm.fit.df)
i.lsm.fit.df <- as.data.frame(summary(
i.lsm.mbb.fit, c("R[1,2]", "R[1,3]", "R[2,3]", "sigma_eta", L.idxs,
"p", "rho"), probs = c(.05, .5, .95))$summary)
i.lsm.fit.df$params <- rownames(i.lsm.fit.df)
comb.df <- rbindlist(list(lsm.fit.df, i.lsm.fit.df), idcol = "fit")
comb.df
comb.df[, class := gsub("\\[\\d(,\\d)?\\]", "", params)]
comb.df[, class := ifelse(class == "Lambda_mat", "loading", class)]
comb.df[, class := ifelse(class == "p", "mean", class)]
comb.df[, class := ifelse(class == "rho", "ICC", class)]
comb.df[, class := ifelse(class == "sigma_eta", "load_icc", class)]
comb.df[, item := as.integer(gsub("[a-z]+|[A-Z]+|\\[|\\]|\\_|,\\d", "", params))]
setnames(comb.df, c("50%", "5%", "95%"), c("median", "ll", "ul"))
comb.df
comb.df[, pos := ifelse(
class == "loading", as.integer(item), ifelse(
class == "mean", as.integer(item) + 6, ifelse(
class == "ICC", as.integer(item) + 12, 13)))]
params$item <- params$time
comb.df <- merge(comb.df, params, all.x = TRUE)
comb.df
comb.df[, pop_param := ifelse(
class == "mean", means, ifelse(
class == "loading", abs(lambdas), ifelse(
class == "ICC", iccs, ifelse(
class == "load_ICC", 1, NA))))]
comb.df[, fit.t := factor(
fit, labels = c("Location-ICC", "Location-ICC (binned data)"))]
comb.df
comb.df[pop_param == .9 & class == "mean", median := 1 - median]
comb.df[pop_param == .9 & class == "mean", ll := 1 - ll]
comb.df[pop_param == .9 & class == "mean", ul := 1 - ul]
comb.df[pop_param == .9 & class == "mean", pop_param := 1 - pop_param]
comb.df
library(ggplot2)
library(ggforce)
library(scales)
# Fits: 1 = Location-ICC, 2 = Location-ICC (binned data)
# More extreme loadings from location-only fit
# Smaller ICCs (in high ICC models) from location-only fit
plt.1 <- ggplot(comb.df[class %in% c("loading", "mean", "ICC")],
aes(item, abs(median), fill = fit.t)) +
geom_segment(aes(x = item - .25, xend = item + .25, y = pop_param, yend = pop_param)) +
geom_linerange(aes(ymin = abs(ll), ymax = abs(ul), col = fit.t),
position = position_dodge(.5)) +
facet_wrap(~ reorder(class, pos), scales = "free") +
theme_bw() +
scale_x_continuous(labels = 1:7, breaks = 1:7) +
scale_color_manual(values = cbPalette) +
theme(legend.position = "top", strip.background = element_blank(),
panel.border = element_blank(), panel.grid.minor = element_blank(),
axis.ticks = element_blank(), panel.grid.major.x = element_blank()) +
labs(x = "Item ID", y = "90% quantile intervals", fill = "", col = "")
cbrdf <- comb.df[class == "R"]
cbrdf$class <- "|R|"
cbrdf$pop_param <- c(rep(c(.5, .35), 2), rep(.25, 2))
cbrdf$item <- c(rep(c(1, 2), 2), rep(3, 2))
cbrdf$item.f <- c(rep(c("F1-F2", "F1-F3"), 2), rep("F2-F3", 2))
plt.2 <- ggplot(cbrdf, aes(item, abs(median), fill = fit.t)) +
geom_linerange(aes(ymin = abs(ll), ymax = abs(ul), col = fit.t),
position = position_dodge(.5)) +
geom_segment(aes(x = item - .25, xend = item + .25, y = pop_param, yend = pop_param)) +
facet_wrap(~ reorder(class, pos), scales = "free") +
scale_x_continuous(labels = c("F1-F2", "F1-F3", "F2-F3"), breaks = 1:3) +
scale_y_continuous(breaks = c(.1, .25, .35, .5, .6)) +
theme_bw() + guides(col = FALSE) +
scale_color_manual(values = cbPalette) +
theme(legend.position = "top", strip.background = element_blank(),
panel.border = element_blank(), panel.grid.minor = element_blank(),
axis.ticks = element_blank(), panel.grid.major.x = element_blank(),
axis.title.y = element_blank()) +
labs(x = "Correlation", y = "90% quantile intervals", fill = "", col = "")
library(patchwork)
plt.1 + plt.2 + plot_layout(widths = c(3, 1))
ggsave("plots/sim_2_res.pdf", height = 3.5, width = 6.5)
ggsave(paste0(plot_folder, "sim_2_res.pdf"), height = 3.5, width = 6.5)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_list_contact_flows}
\alias{connect_list_contact_flows}
\title{Provides information about the contact flows for the specified Amazon
Connect instance}
\usage{
connect_list_contact_flows(InstanceId, ContactFlowTypes, NextToken,
MaxResults)
}
\arguments{
\item{InstanceId}{[required] The identifier of the Amazon Connect instance.}
\item{ContactFlowTypes}{The type of contact flow.}
\item{NextToken}{The token for the next set of results. Use the value returned in the
previous response in the next request to retrieve the next set of
results.}
\item{MaxResults}{The maximimum number of results to return per page.}
}
\value{
A list with the following syntax:\preformatted{list(
ContactFlowSummaryList = list(
list(
Id = "string",
Arn = "string",
Name = "string",
ContactFlowType = "CONTACT_FLOW"|"CUSTOMER_QUEUE"|"CUSTOMER_HOLD"|"CUSTOMER_WHISPER"|"AGENT_HOLD"|"AGENT_WHISPER"|"OUTBOUND_WHISPER"|"AGENT_TRANSFER"|"QUEUE_TRANSFER"
)
),
NextToken = "string"
)
}
}
\description{
Provides information about the contact flows for the specified Amazon
Connect instance.
You can also create and update contact flows using the \href{https://docs.aws.amazon.com/connect/latest/adminguide/flow-language.html}{Amazon Connect Flow language}.
For more information about contact flows, see \href{https://docs.aws.amazon.com/connect/latest/adminguide/concepts-contact-flows.html}{Contact Flows}
in the \emph{Amazon Connect Administrator Guide}.
}
\section{Request syntax}{
\preformatted{svc$list_contact_flows(
InstanceId = "string",
ContactFlowTypes = list(
"CONTACT_FLOW"|"CUSTOMER_QUEUE"|"CUSTOMER_HOLD"|"CUSTOMER_WHISPER"|"AGENT_HOLD"|"AGENT_WHISPER"|"OUTBOUND_WHISPER"|"AGENT_TRANSFER"|"QUEUE_TRANSFER"
),
NextToken = "string",
MaxResults = 123
)
}
}
\keyword{internal}
| /cran/paws.customer.engagement/man/connect_list_contact_flows.Rd | permissive | TWarczak/paws | R | false | true | 1,931 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_list_contact_flows}
\alias{connect_list_contact_flows}
\title{Provides information about the contact flows for the specified Amazon
Connect instance}
\usage{
connect_list_contact_flows(InstanceId, ContactFlowTypes, NextToken,
MaxResults)
}
\arguments{
\item{InstanceId}{[required] The identifier of the Amazon Connect instance.}
\item{ContactFlowTypes}{The type of contact flow.}
\item{NextToken}{The token for the next set of results. Use the value returned in the
previous response in the next request to retrieve the next set of
results.}
\item{MaxResults}{The maximimum number of results to return per page.}
}
\value{
A list with the following syntax:\preformatted{list(
ContactFlowSummaryList = list(
list(
Id = "string",
Arn = "string",
Name = "string",
ContactFlowType = "CONTACT_FLOW"|"CUSTOMER_QUEUE"|"CUSTOMER_HOLD"|"CUSTOMER_WHISPER"|"AGENT_HOLD"|"AGENT_WHISPER"|"OUTBOUND_WHISPER"|"AGENT_TRANSFER"|"QUEUE_TRANSFER"
)
),
NextToken = "string"
)
}
}
\description{
Provides information about the contact flows for the specified Amazon
Connect instance.
You can also create and update contact flows using the \href{https://docs.aws.amazon.com/connect/latest/adminguide/flow-language.html}{Amazon Connect Flow language}.
For more information about contact flows, see \href{https://docs.aws.amazon.com/connect/latest/adminguide/concepts-contact-flows.html}{Contact Flows}
in the \emph{Amazon Connect Administrator Guide}.
}
\section{Request syntax}{
\preformatted{svc$list_contact_flows(
InstanceId = "string",
ContactFlowTypes = list(
"CONTACT_FLOW"|"CUSTOMER_QUEUE"|"CUSTOMER_HOLD"|"CUSTOMER_WHISPER"|"AGENT_HOLD"|"AGENT_WHISPER"|"OUTBOUND_WHISPER"|"AGENT_TRANSFER"|"QUEUE_TRANSFER"
),
NextToken = "string",
MaxResults = 123
)
}
}
\keyword{internal}
|
testlist <- list(Beta = 0, CVLinf = 0, FM = 5.87938118551083e-322, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 7.13706800764285e-79, SL95 = 5.42289851365943e+188, nage = 0L, nlen = 0L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615830351-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 381 | r | testlist <- list(Beta = 0, CVLinf = 0, FM = 5.87938118551083e-322, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 7.13706800764285e-79, SL95 = 5.42289851365943e+188, nage = 0L, nlen = 0L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
###### data setting ###################################################
setwd("D:/MyGitHub/Computer_Intensive_Statistics_in_Ecology/data")
rawdata <- read.table("modeldata.txt")
x.test <- as.matrix(cbind(1, rawdata[, 1:2]))
y.valid <- as.matrix(rawdata[, 3:5])
n <- dim(rawdata)[1] # number of data point
n.run <- 1000 # number of learning step
rate <- 0.001 # learning rate
# tanh(x) is the activation function
dtanh <- function(x) {1 - tanh(x)^2} # derivative of tanh(x)
# There are 5 hidden neurons, and we suppose that each hidden neuron
# contains 2 units. Remember to add bias term.
#############################################################
# Start from here.
# hidden unit value
S <- matrix(0, nrow=n, ncol=2) # hidden neuron 1
H <- matrix(0, nrow=n, ncol=2) # hidden neuron 2
P <- matrix(0, nrow=n, ncol=2) # hidden neuron 3
Q <- matrix(0, nrow=n, ncol=2) # hidden neuron 4
R <- matrix(0, nrow=n, ncol=2) # hidden neuron 5
# coefficient 1
a1 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 1, unit 1
b1 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 2, unit 1
c1 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 3, unit 1
d1 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 4, unit 1
e1 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 5, unit 1
# coefficient 2
a2 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 1, unit 2
b2 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 2, unit 2
c2 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 3, unit 2
d2 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 4, unit 2
e2 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 5, unit 2
# final output weights
f1 <- matrix(0, nrow=n.run, ncol=2+1)
f2 <- matrix(0, nrow=n.run, ncol=2+1)
f3 <- matrix(0, nrow=n.run, ncol=2+1)
# final output
y.result <- matrix(0, nrow=n.run, ncol=3)
# mse
mse = matrix(0, nrow=n.run, ncol=3)
# set initial values
a1[1, ] = runif(3, -0.5, 0.5)
b1[1, ] = runif(3, -0.5, 0.5)
c1[1, ] = runif(3, -0.5, 0.5)
d1[1, ] = runif(3, -0.5, 0.5)
e1[1, ] = runif(3, -0.5, 0.5)
a2[1, ] = runif(3, -0.5, 0.5)
b2[1, ] = runif(3, -0.5, 0.5)
c2[1, ] = runif(3, -0.5, 0.5)
d2[1, ] = runif(3, -0.5, 0.5)
e2[1, ] = runif(3, -0.5, 0.5)
f1[1, ] = runif(3, -0.5, 0.5)
f2[1, ] = runif(3, -0.5, 0.5)
f3[1, ] = runif(3, -0.5, 0.5)
# Now we compute MSE for step 1
S = tanh(x.test%*%cbind(a1[1, ], a2[1, ]))
H = tanh(cbind(1, S)%*%cbind(b1[1, ], b2[1, ]))
P = tanh(cbind(1, H)%*%cbind(c1[1, ], c2[1, ]))
Q = tanh(cbind(1, P)%*%cbind(d1[1, ], d2[1, ]))
R = tanh(cbind(1, Q)%*%cbind(e1[1, ], e2[1, ]))
y.result = tanh(cbind(1, R)%*%cbind(f1[1, ], f2[1, ], f3[1, ]))
mse[1, ] = diag(t(y.valid - y.result)%*%(y.valid - y.result))/n
# In backward pass process, we first compute the next time values mannually.
delta1 <- matrix(0, nrow=n, ncol=3)
delta2 <- matrix(0, nrow=n, ncol=2)
delta3 <- matrix(0, nrow=n, ncol=2)
delta4 <- matrix(0, nrow=n, ncol=2)
delta5 <- matrix(0, nrow=n, ncol=2)
delta6 <- matrix(0, nrow=n, ncol=2)
delta1 = dtanh(cbind(1, R)%*%cbind(f1[1, ], f2[1, ], f3[1, ]))*(y.valid - y.result)
delta2 = dtanh(cbind(1, Q)%*%cbind(e1[1, ], e2[1, ]))*
(delta1%*%t(cbind(f1[1, ], f2[1, ], f3[1, ])[-1, ]))
delta3 = dtanh(cbind(1, P)%*%cbind(d1[1, ], d2[1, ]))*
(delta2%*%t(cbind(e1[1, ], e2[1, ])[-1, ]))
delta4 = dtanh(cbind(1, H)%*%cbind(c1[1, ], c2[1, ]))*
(delta3%*%t(cbind(d1[1, ], d2[1, ])[-1, ]))
delta5 = dtanh(cbind(1, S)%*%cbind(b1[1, ], b2[1, ]))*
(delta4%*%t(cbind(c1[1, ], c2[1, ])[-1, ]))
delta6 = dtanh(x.test%*%cbind(a1[1, ], a2[1, ]))*
(delta5%*%t(cbind(b1[1, ], b2[1, ])[-1, ]))
f1[2, ] = rate*delta1[, 1]%*%cbind(1, R)/n + f1[1, ]
f2[2, ] = rate*delta1[, 2]%*%cbind(1, R)/n + f2[1, ]
f3[2, ] = rate*delta1[, 3]%*%cbind(1, R)/n + f3[1, ]
e1[2, ] = rate*delta2[, 1]%*%cbind(1, Q)/n + e1[1, ]
d1[2, ] = rate*delta3[, 1]%*%cbind(1, P)/n + d1[1, ]
c1[2, ] = rate*delta4[, 1]%*%cbind(1, H)/n + c1[1, ]
b1[2, ] = rate*delta5[, 1]%*%cbind(1, S)/n + b1[1, ]
a1[2, ] = rate*delta6[, 1]%*%x.test/n + a1[1, ]
e2[2, ] = rate*delta2[, 2]%*%cbind(1, Q)/n + e2[1, ]
d2[2, ] = rate*delta3[, 2]%*%cbind(1, P)/n + d2[1, ]
c2[2, ] = rate*delta4[, 2]%*%cbind(1, H)/n + c2[1, ]
b2[2, ] = rate*delta5[, 2]%*%cbind(1, S)/n + b2[1, ]
a2[2, ] = rate*delta6[, 2]%*%x.test/n + a2[1, ]
# Thus, we have new coefficients now.
# Run the loop for the following steps.
for (i in 2:(n.run-1)) {
S = tanh(x.test%*%cbind(a1[i, ], a2[i, ]))
H = tanh(cbind(1, S)%*%cbind(b1[i, ], b2[i, ]))
P = tanh(cbind(1, H)%*%cbind(c1[i, ], c2[i, ]))
Q = tanh(cbind(1, P)%*%cbind(d1[i, ], d2[i, ]))
R = tanh(cbind(1, Q)%*%cbind(e1[i, ], e2[i, ]))
y.result = tanh(cbind(1, R)%*%cbind(f1[i, ], f2[i, ], f3[i, ]))
mse[i, ] = diag(t(y.valid - y.result)%*%(y.valid - y.result))/n
delta1 = dtanh(cbind(1, R)%*%cbind(f1[i, ], f2[i, ], f3[i, ]))*(y.valid - y.result)
delta2 = dtanh(cbind(1, Q)%*%cbind(e1[i, ], e2[i, ]))*
(delta1%*%t(cbind(f1[i, ], f2[i, ], f3[i, ])[-1, ]))
delta3 = dtanh(cbind(1, P)%*%cbind(d1[i, ], d2[i, ]))*
(delta2%*%t(cbind(e1[i, ], e2[i, ])[-1, ]))
delta4 = dtanh(cbind(1, H)%*%cbind(c1[i, ], c2[i, ]))*
(delta3%*%t(cbind(d1[i, ], d2[i, ])[-1, ]))
delta5 = dtanh(cbind(1, S)%*%cbind(b1[i, ], b2[i, ]))*
(delta4%*%t(cbind(c1[i, ], c2[i, ])[-1, ]))
delta6 = dtanh(x.test%*%cbind(a1[i, ], a2[i, ]))*
(delta5%*%t(cbind(b1[i, ], b2[i, ])[-1, ]))
f1[(i+1), ] = rate*delta1[, 1]%*%cbind(1, R)/n + f1[i, ]
f2[(i+1), ] = rate*delta1[, 2]%*%cbind(1, R)/n + f2[i, ]
f3[(i+1), ] = rate*delta1[, 3]%*%cbind(1, R)/n + f3[i, ]
e1[(i+1), ] = rate*delta2[, 1]%*%cbind(1, Q)/n + e1[i, ]
d1[(i+1), ] = rate*delta3[, 1]%*%cbind(1, P)/n + d1[i, ]
c1[(i+1), ] = rate*delta4[, 1]%*%cbind(1, H)/n + c1[i, ]
b1[(i+1), ] = rate*delta5[, 1]%*%cbind(1, S)/n + b1[i, ]
a1[(i+1), ] = rate*delta6[, 1]%*%x.test/n + a1[i, ]
e2[(i+1), ] = rate*delta2[, 2]%*%cbind(1, Q)/n + e2[i, ]
d2[(i+1), ] = rate*delta3[, 2]%*%cbind(1, P)/n + d2[i, ]
c2[(i+1), ] = rate*delta4[, 2]%*%cbind(1, H)/n + c2[i, ]
b2[(i+1), ] = rate*delta5[, 2]%*%cbind(1, S)/n + b2[i, ]
a2[(i+1), ] = rate*delta6[, 2]%*%x.test/n + a2[i, ]
}
# Remarkably, it is very very fast XDDDD.
y.result # seems not resonable
plot(mse[, 1]) # MSE declines slowly
# try different rate, and run all of the things above
rate = 0.5
#-----------------------------------------------
# After run all of the things above....
y.result # seems resonable now
plot(mse[, 1])
plot(mse[, 2])
plot(mse[, 3])
plot(apply(mse, 1, sum)[-n.run], type='l', xlab='Step', ylab='MSE')
# find the minimum MSE
min.mse <- which.min(apply(mse, 1, sum)[-n.run])
# compute the final codings with minimal MSE
S = tanh(x.test%*%cbind(a1[min.mse, ], a2[min.mse, ]))
H = tanh(cbind(1, S)%*%cbind(b1[min.mse, ], b2[min.mse, ]))
P = tanh(cbind(1, H)%*%cbind(c1[min.mse, ], c2[min.mse, ]))
Q = tanh(cbind(1, P)%*%cbind(d1[min.mse, ], d2[min.mse, ]))
R = tanh(cbind(1, Q)%*%cbind(e1[min.mse, ], e2[min.mse, ]))
y.result = tanh(cbind(1, R)%*%cbind(f1[min.mse, ], f2[min.mse, ], f3[min.mse, ]))
# coefficients with minimal MSE
a1[min.mse, ]
b1[min.mse, ]
c1[min.mse, ]
d1[min.mse, ]
e1[min.mse, ]
a2[min.mse, ]
b2[min.mse, ]
c2[min.mse, ]
d2[min.mse, ]
e2[min.mse, ]
f1[min.mse, ]
f2[min.mse, ]
f3[min.mse, ]
# plot the original data, and compare it with the output data
y1 = ifelse(rawdata[, 3]==1, 1, 0)
y2 = ifelse(rawdata[, 4]==1, 2, 0)
y3 = ifelse(rawdata[, 5]==1, 3, 0)
y <- y1 + y2 + y3
plot(x.test[, -1], col=y, pch=19, xlab='', ylab='', main="Original data")
y1 = ifelse(floor(y.result)[, 1]==0, 1, 0)
y2 = ifelse(floor(y.result)[, 2]==0, 2, 0)
y3 = ifelse(floor(y.result)[, 3]==0, 3, 0)
y <- y1 + y2 + y3
plot(x.test[, -1], col=y, pch=19, xlab='', ylab='', main="Output data")
# We see that some points are classified into wrong classes, especially the upper-right corner.
# Try longer steps.
n.run = 5000
rate = 0.5
| /homework17_Multi-layer perceptron.r | no_license | snakepowerpoint/Computer_Intensive_Statistics_in_Ecology | R | false | false | 7,964 | r | ###### data setting ###################################################
setwd("D:/MyGitHub/Computer_Intensive_Statistics_in_Ecology/data")
rawdata <- read.table("modeldata.txt")
x.test <- as.matrix(cbind(1, rawdata[, 1:2]))
y.valid <- as.matrix(rawdata[, 3:5])
n <- dim(rawdata)[1] # number of data point
n.run <- 1000 # number of learning step
rate <- 0.001 # learning rate
# tanh(x) is the activation function
dtanh <- function(x) {1 - tanh(x)^2} # derivative of tanh(x)
# There are 5 hidden neurons, and we suppose that each hidden neuron
# contains 2 units. Remember to add bias term.
#############################################################
# Start from here.
# hidden unit value
S <- matrix(0, nrow=n, ncol=2) # hidden neuron 1
H <- matrix(0, nrow=n, ncol=2) # hidden neuron 2
P <- matrix(0, nrow=n, ncol=2) # hidden neuron 3
Q <- matrix(0, nrow=n, ncol=2) # hidden neuron 4
R <- matrix(0, nrow=n, ncol=2) # hidden neuron 5
# coefficient 1
a1 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 1, unit 1
b1 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 2, unit 1
c1 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 3, unit 1
d1 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 4, unit 1
e1 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 5, unit 1
# coefficient 2
a2 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 1, unit 2
b2 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 2, unit 2
c2 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 3, unit 2
d2 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 4, unit 2
e2 <- matrix(0, nrow=n.run, ncol=2+1) # hidden neuron 5, unit 2
# final output weights
f1 <- matrix(0, nrow=n.run, ncol=2+1)
f2 <- matrix(0, nrow=n.run, ncol=2+1)
f3 <- matrix(0, nrow=n.run, ncol=2+1)
# final output
y.result <- matrix(0, nrow=n.run, ncol=3)
# mse
mse = matrix(0, nrow=n.run, ncol=3)
# set initial values
a1[1, ] = runif(3, -0.5, 0.5)
b1[1, ] = runif(3, -0.5, 0.5)
c1[1, ] = runif(3, -0.5, 0.5)
d1[1, ] = runif(3, -0.5, 0.5)
e1[1, ] = runif(3, -0.5, 0.5)
a2[1, ] = runif(3, -0.5, 0.5)
b2[1, ] = runif(3, -0.5, 0.5)
c2[1, ] = runif(3, -0.5, 0.5)
d2[1, ] = runif(3, -0.5, 0.5)
e2[1, ] = runif(3, -0.5, 0.5)
f1[1, ] = runif(3, -0.5, 0.5)
f2[1, ] = runif(3, -0.5, 0.5)
f3[1, ] = runif(3, -0.5, 0.5)
# Now we compute MSE for step 1
S = tanh(x.test%*%cbind(a1[1, ], a2[1, ]))
H = tanh(cbind(1, S)%*%cbind(b1[1, ], b2[1, ]))
P = tanh(cbind(1, H)%*%cbind(c1[1, ], c2[1, ]))
Q = tanh(cbind(1, P)%*%cbind(d1[1, ], d2[1, ]))
R = tanh(cbind(1, Q)%*%cbind(e1[1, ], e2[1, ]))
y.result = tanh(cbind(1, R)%*%cbind(f1[1, ], f2[1, ], f3[1, ]))
mse[1, ] = diag(t(y.valid - y.result)%*%(y.valid - y.result))/n
# In backward pass process, we first compute the next time values mannually.
delta1 <- matrix(0, nrow=n, ncol=3)
delta2 <- matrix(0, nrow=n, ncol=2)
delta3 <- matrix(0, nrow=n, ncol=2)
delta4 <- matrix(0, nrow=n, ncol=2)
delta5 <- matrix(0, nrow=n, ncol=2)
delta6 <- matrix(0, nrow=n, ncol=2)
delta1 = dtanh(cbind(1, R)%*%cbind(f1[1, ], f2[1, ], f3[1, ]))*(y.valid - y.result)
delta2 = dtanh(cbind(1, Q)%*%cbind(e1[1, ], e2[1, ]))*
(delta1%*%t(cbind(f1[1, ], f2[1, ], f3[1, ])[-1, ]))
delta3 = dtanh(cbind(1, P)%*%cbind(d1[1, ], d2[1, ]))*
(delta2%*%t(cbind(e1[1, ], e2[1, ])[-1, ]))
delta4 = dtanh(cbind(1, H)%*%cbind(c1[1, ], c2[1, ]))*
(delta3%*%t(cbind(d1[1, ], d2[1, ])[-1, ]))
delta5 = dtanh(cbind(1, S)%*%cbind(b1[1, ], b2[1, ]))*
(delta4%*%t(cbind(c1[1, ], c2[1, ])[-1, ]))
delta6 = dtanh(x.test%*%cbind(a1[1, ], a2[1, ]))*
(delta5%*%t(cbind(b1[1, ], b2[1, ])[-1, ]))
f1[2, ] = rate*delta1[, 1]%*%cbind(1, R)/n + f1[1, ]
f2[2, ] = rate*delta1[, 2]%*%cbind(1, R)/n + f2[1, ]
f3[2, ] = rate*delta1[, 3]%*%cbind(1, R)/n + f3[1, ]
e1[2, ] = rate*delta2[, 1]%*%cbind(1, Q)/n + e1[1, ]
d1[2, ] = rate*delta3[, 1]%*%cbind(1, P)/n + d1[1, ]
c1[2, ] = rate*delta4[, 1]%*%cbind(1, H)/n + c1[1, ]
b1[2, ] = rate*delta5[, 1]%*%cbind(1, S)/n + b1[1, ]
a1[2, ] = rate*delta6[, 1]%*%x.test/n + a1[1, ]
e2[2, ] = rate*delta2[, 2]%*%cbind(1, Q)/n + e2[1, ]
d2[2, ] = rate*delta3[, 2]%*%cbind(1, P)/n + d2[1, ]
c2[2, ] = rate*delta4[, 2]%*%cbind(1, H)/n + c2[1, ]
b2[2, ] = rate*delta5[, 2]%*%cbind(1, S)/n + b2[1, ]
a2[2, ] = rate*delta6[, 2]%*%x.test/n + a2[1, ]
# Thus, we have new coefficients now.
# Run the loop for the following steps.
for (i in 2:(n.run-1)) {
S = tanh(x.test%*%cbind(a1[i, ], a2[i, ]))
H = tanh(cbind(1, S)%*%cbind(b1[i, ], b2[i, ]))
P = tanh(cbind(1, H)%*%cbind(c1[i, ], c2[i, ]))
Q = tanh(cbind(1, P)%*%cbind(d1[i, ], d2[i, ]))
R = tanh(cbind(1, Q)%*%cbind(e1[i, ], e2[i, ]))
y.result = tanh(cbind(1, R)%*%cbind(f1[i, ], f2[i, ], f3[i, ]))
mse[i, ] = diag(t(y.valid - y.result)%*%(y.valid - y.result))/n
delta1 = dtanh(cbind(1, R)%*%cbind(f1[i, ], f2[i, ], f3[i, ]))*(y.valid - y.result)
delta2 = dtanh(cbind(1, Q)%*%cbind(e1[i, ], e2[i, ]))*
(delta1%*%t(cbind(f1[i, ], f2[i, ], f3[i, ])[-1, ]))
delta3 = dtanh(cbind(1, P)%*%cbind(d1[i, ], d2[i, ]))*
(delta2%*%t(cbind(e1[i, ], e2[i, ])[-1, ]))
delta4 = dtanh(cbind(1, H)%*%cbind(c1[i, ], c2[i, ]))*
(delta3%*%t(cbind(d1[i, ], d2[i, ])[-1, ]))
delta5 = dtanh(cbind(1, S)%*%cbind(b1[i, ], b2[i, ]))*
(delta4%*%t(cbind(c1[i, ], c2[i, ])[-1, ]))
delta6 = dtanh(x.test%*%cbind(a1[i, ], a2[i, ]))*
(delta5%*%t(cbind(b1[i, ], b2[i, ])[-1, ]))
f1[(i+1), ] = rate*delta1[, 1]%*%cbind(1, R)/n + f1[i, ]
f2[(i+1), ] = rate*delta1[, 2]%*%cbind(1, R)/n + f2[i, ]
f3[(i+1), ] = rate*delta1[, 3]%*%cbind(1, R)/n + f3[i, ]
e1[(i+1), ] = rate*delta2[, 1]%*%cbind(1, Q)/n + e1[i, ]
d1[(i+1), ] = rate*delta3[, 1]%*%cbind(1, P)/n + d1[i, ]
c1[(i+1), ] = rate*delta4[, 1]%*%cbind(1, H)/n + c1[i, ]
b1[(i+1), ] = rate*delta5[, 1]%*%cbind(1, S)/n + b1[i, ]
a1[(i+1), ] = rate*delta6[, 1]%*%x.test/n + a1[i, ]
e2[(i+1), ] = rate*delta2[, 2]%*%cbind(1, Q)/n + e2[i, ]
d2[(i+1), ] = rate*delta3[, 2]%*%cbind(1, P)/n + d2[i, ]
c2[(i+1), ] = rate*delta4[, 2]%*%cbind(1, H)/n + c2[i, ]
b2[(i+1), ] = rate*delta5[, 2]%*%cbind(1, S)/n + b2[i, ]
a2[(i+1), ] = rate*delta6[, 2]%*%x.test/n + a2[i, ]
}
# Remarkably, it is very very fast XDDDD.
y.result # seems not resonable
plot(mse[, 1]) # MSE declines slowly
# try different rate, and run all of the things above
rate = 0.5
#-----------------------------------------------
# After run all of the things above....
y.result # seems resonable now
plot(mse[, 1])
plot(mse[, 2])
plot(mse[, 3])
plot(apply(mse, 1, sum)[-n.run], type='l', xlab='Step', ylab='MSE')
# find the minimum MSE
min.mse <- which.min(apply(mse, 1, sum)[-n.run])
# compute the final codings with minimal MSE
S = tanh(x.test%*%cbind(a1[min.mse, ], a2[min.mse, ]))
H = tanh(cbind(1, S)%*%cbind(b1[min.mse, ], b2[min.mse, ]))
P = tanh(cbind(1, H)%*%cbind(c1[min.mse, ], c2[min.mse, ]))
Q = tanh(cbind(1, P)%*%cbind(d1[min.mse, ], d2[min.mse, ]))
R = tanh(cbind(1, Q)%*%cbind(e1[min.mse, ], e2[min.mse, ]))
y.result = tanh(cbind(1, R)%*%cbind(f1[min.mse, ], f2[min.mse, ], f3[min.mse, ]))
# coefficients with minimal MSE
a1[min.mse, ]
b1[min.mse, ]
c1[min.mse, ]
d1[min.mse, ]
e1[min.mse, ]
a2[min.mse, ]
b2[min.mse, ]
c2[min.mse, ]
d2[min.mse, ]
e2[min.mse, ]
f1[min.mse, ]
f2[min.mse, ]
f3[min.mse, ]
# plot the original data, and compare it with the output data
y1 = ifelse(rawdata[, 3]==1, 1, 0)
y2 = ifelse(rawdata[, 4]==1, 2, 0)
y3 = ifelse(rawdata[, 5]==1, 3, 0)
y <- y1 + y2 + y3
plot(x.test[, -1], col=y, pch=19, xlab='', ylab='', main="Original data")
y1 = ifelse(floor(y.result)[, 1]==0, 1, 0)
y2 = ifelse(floor(y.result)[, 2]==0, 2, 0)
y3 = ifelse(floor(y.result)[, 3]==0, 3, 0)
y <- y1 + y2 + y3
plot(x.test[, -1], col=y, pch=19, xlab='', ylab='', main="Output data")
# We see that some points are classified into wrong classes, especially the upper-right corner.
# Try longer steps.
n.run = 5000
rate = 0.5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.