content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
ui <- basicPage(
plotOutput("plot1",
click = "plot_click",
dblclick = "plot_dblclick",
hover = "plot_hover",
brush = "plot_brush"
),
verbatimTextOutput("info")
)
| /src/ui.R | no_license | kondalaraogangavarapu/cool-shiny | R | false | false | 184 | r | ui <- basicPage(
plotOutput("plot1",
click = "plot_click",
dblclick = "plot_dblclick",
hover = "plot_hover",
brush = "plot_brush"
),
verbatimTextOutput("info")
)
|
setwd("~/Desktop/Econ Research")
#make sure your working durectory has the WVS.dta file
constructWVS("WVS.dta")
cleanWVS()
describeWVS() | /WVS/masterWVS.R | no_license | calbarber21/Hwang-Lab-Economic-Research | R | false | false | 138 | r | setwd("~/Desktop/Econ Research")
#make sure your working durectory has the WVS.dta file
constructWVS("WVS.dta")
cleanWVS()
describeWVS() |
# Machine Learning Script (assignment 1.5)
# Nicolas Soncini - 2018
library(stringr)
options(digits=5)
n <- c(125,250,500,1000,2000,4000)
d_training_error_bp <- c()
d_test_error_bp <- c()
d_nodes_bp <- c()
d_training_error_ap <- c()
d_test_error_ap <- c()
d_nodes_ap <- c()
for (i in n){
dir <- paste0("Diag", i, "Ds/")
fname <- paste0(dir,"diag",toString(i)) #ie: Diag125Ds/diag125
oname <- paste0(fname, ".out") #ie: Diag125Ds/diag125.out
system(paste("mv", oname, paste0(oname, ".old")))
for (j in 1:20){
dname <- paste0(fname, "_", toString(j)) #ie: diag125_1
# Run C4.5, redirect interesting output to <fname>.out
cmd <- paste("c4.5 -f", dname, "-g -u | grep \"<<\" >>", oname)
system(cmd)
}
nodesbp <- 0
errorbp <- 0
testbp <- 0
nodesap <- 0
errorap <- 0
testap <- 0
# Run the Regular Expression to find the values
# Regex: 1-NodesBP 2-ErrorBP 3-NodesAP 4-ErrorAP
regex <- ".*?(\\d+).*?([+-]?\\d*\\.\\d+)(?![-+0-9\\.]).*?(\\d+).*?([+-]?\\d*\\.\\d+)(?![-+0-9\\.])"
con <- file(oname, "r")
while (TRUE) {
line <- readLines(con, n=1)
if(length(line) == 0) {break}
m <- str_match(line, regex)
nodesbp <- nodesbp + strtoi(m[2])
errorbp <- errorbp + as.double(m[3])
nodesap <- nodesap + strtoi(m[4])
errorap <- errorap + as.double(m[5])
line <- readLines(con, n=1)
if(length(line) == 0) {break}
m <- str_match(line, regex)
testbp <- testbp + as.double(m[3])
testap <- testap + as.double(m[5])
}
close(con)
say <- paste("Diagonal: For n =", i, ": nodesbp (", nodesbp/20 ,") ; errorbp (", errorbp/20 ,") ; testbp (", testbp/20, ").")
print(say)
say <- paste("Diagonal: For n =", i, ": nodesap (", nodesap/20 ,") ; errorap (", errorap/20 ,") ; testap (", testap/20, ").")
print(say)
d_training_error_bp <- c(d_training_error_bp, errorbp/20)
d_test_error_bp <- c(d_test_error_bp, testbp/20)
d_nodes_bp <- c(d_nodes_bp, nodesbp/20)
d_training_error_ap <- c(d_training_error_ap, errorap/20)
d_test_error_ap <- c(d_test_error_ap, testap/20)
d_nodes_ap <- c(d_nodes_ap, nodesap/20)
}
p_training_error_bp <- c()
p_test_error_bp <- c()
p_nodes_bp <- c()
p_training_error_ap <- c()
p_test_error_ap <- c()
p_nodes_ap <- c()
for (i in n){
dir <- paste0("Par", i, "Ds/")
fname <- paste0(dir,"par",toString(i)) #ie: Par125Ds/par125
oname <- paste0(fname, ".out") #ie: par125Ds/par125.out
system(paste("mv", oname, paste0(oname, ".old")))
for (j in 1:20){
dname <- paste0(fname, "_", toString(j)) #ie: par125_1
# Run C4.5, redirect interesting output to <fname>.out
cmd <- paste("c4.5 -f", dname, "-g -u | grep \"<<\" >>", oname)
system(cmd)
}
nodesbp <- 0
errorbp <- 0
testbp <- 0
nodesap <- 0
errorap <- 0
testap <- 0
# Run the Regular Expression to find the values
# Regex: 1-NodesBP 2-ErrorBP 3-NodesAP 4-ErrorAP
regex <- ".*?(\\d+).*?([+-]?\\d*\\.\\d+)(?![-+0-9\\.]).*?(\\d+).*?([+-]?\\d*\\.\\d+)(?![-+0-9\\.])"
con <- file(oname, "r")
while (TRUE) {
line <- readLines(con, n=1)
if(length(line) == 0) {break}
m <- str_match(line, regex)
nodesbp <- nodesbp + strtoi(m[2])
errorbp <- errorbp + as.double(m[3])
nodesap <- nodesap + strtoi(m[4])
errorap <- errorap + as.double(m[5])
line <- readLines(con, n=1)
if(length(line) == 0) {break}
m <- str_match(line, regex)
testbp <- testbp + as.double(m[3])
testap <- testap + as.double(m[5])
}
close(con)
say <- paste("Parallel: For n =", i, ": nodesbp (", nodesbp/20 ,") ; errorbp (", errorbp/20 ,") ; testbp (", testbp/20, ").")
print(say)
say <- paste("Parallel: For n =", i, ": nodesap (", nodesap/20 ,") ; errorap (", errorap/20 ,") ; testap (", testap/20, ").")
print(say)
p_training_error_bp <- c(p_training_error_bp, errorbp/20)
p_test_error_bp <- c(p_test_error_bp, testbp/20)
p_nodes_bp <- c(p_nodes_bp, nodesbp/20)
p_training_error_ap <- c(p_training_error_ap, errorap/20)
p_test_error_ap <- c(p_test_error_ap, testap/20)
p_nodes_ap <- c(p_nodes_ap, nodesap/20)
}
## PLOTTING ##
library(ggplot2)
# Before prunning training error and test error
png("errorbp.png")
limy <- c(min(d_training_error_bp,d_test_error_bp),max(d_training_error_bp,d_test_error_bp))
plot(n, d_training_error_bp, col="red", ylim=limy, type="o", xlab="Training points", ylab="Percentual error")
lines(n, d_test_error_bp, col="red", type="o", lty=3)
lines(n, p_training_error_bp, col="blue", type="o")
lines(n, p_test_error_bp, col="blue", type="o", lty=3)
legend( x="topright",
, legend=c("Diagonal train", "Diagonal test", "Parallel train", "Parallel test")
, col=c("red", "red", "blue", "blue")
, lty=c(1,3,1,3)
)
dev.off()
# Before prunning node count
png("nodecbp.png")
limy <- c(min(p_nodes_bp, d_nodes_bp),max(p_nodes_bp,d_nodes_bp))
plot(n, d_nodes_bp, col="red", ylim=limy, type="o", xlab="Training points", ylab="Decision tree nodes")
lines(n, p_nodes_bp, col="blue", type="o")
legend( x="topleft",
, legend=c("Diagonal", "Parallel")
, col=c("red", "blue")
, lty=c(1,1)
)
dev.off()
# After prunning training error and test error
png("errorap.png")
limy <- c(min(d_training_error_ap,d_test_error_ap),max(d_training_error_ap,d_test_error_ap))
plot(n, d_training_error_ap, col="red", ylim=limy, type="o", xlab="Training points", ylab="Percentual error")
lines(n, d_test_error_ap, col="red", type="o", lty=3)
lines(n, p_training_error_ap, col="blue", type="o")
lines(n, p_test_error_ap, col="blue", type="o", lty=3)
legend( x="topright",
, legend=c("Diagonal train", "Diagonal test", "Parallel train", "Parallel test")
, col=c("red", "red", "blue", "blue")
, lty=c(1,3,1,3)
)
dev.off()
# After prunning node count
png("nodecap.png")
limy <- c(min(p_nodes_ap, d_nodes_ap),max(p_nodes_ap,d_nodes_ap))
plot(n, d_nodes_ap, col="red", ylim=limy,type="o", xlab="Training points", ylab="Decision tree nodes")
lines(n, p_nodes_ap, col="blue", type="o")
legend( x="topleft",
, legend=c("Diagonal", "Parallel")
, col=c("red", "blue")
, lty=c(1,1)
)
dev.off()
| /DecisionTrees/NDependant/ndependant.R | no_license | doraemon96/CS-MachineLearning | R | false | false | 6,186 | r | # Machine Learning Script (assignment 1.5)
# Nicolas Soncini - 2018
library(stringr)
options(digits=5)
n <- c(125,250,500,1000,2000,4000)
d_training_error_bp <- c()
d_test_error_bp <- c()
d_nodes_bp <- c()
d_training_error_ap <- c()
d_test_error_ap <- c()
d_nodes_ap <- c()
for (i in n){
dir <- paste0("Diag", i, "Ds/")
fname <- paste0(dir,"diag",toString(i)) #ie: Diag125Ds/diag125
oname <- paste0(fname, ".out") #ie: Diag125Ds/diag125.out
system(paste("mv", oname, paste0(oname, ".old")))
for (j in 1:20){
dname <- paste0(fname, "_", toString(j)) #ie: diag125_1
# Run C4.5, redirect interesting output to <fname>.out
cmd <- paste("c4.5 -f", dname, "-g -u | grep \"<<\" >>", oname)
system(cmd)
}
nodesbp <- 0
errorbp <- 0
testbp <- 0
nodesap <- 0
errorap <- 0
testap <- 0
# Run the Regular Expression to find the values
# Regex: 1-NodesBP 2-ErrorBP 3-NodesAP 4-ErrorAP
regex <- ".*?(\\d+).*?([+-]?\\d*\\.\\d+)(?![-+0-9\\.]).*?(\\d+).*?([+-]?\\d*\\.\\d+)(?![-+0-9\\.])"
con <- file(oname, "r")
while (TRUE) {
line <- readLines(con, n=1)
if(length(line) == 0) {break}
m <- str_match(line, regex)
nodesbp <- nodesbp + strtoi(m[2])
errorbp <- errorbp + as.double(m[3])
nodesap <- nodesap + strtoi(m[4])
errorap <- errorap + as.double(m[5])
line <- readLines(con, n=1)
if(length(line) == 0) {break}
m <- str_match(line, regex)
testbp <- testbp + as.double(m[3])
testap <- testap + as.double(m[5])
}
close(con)
say <- paste("Diagonal: For n =", i, ": nodesbp (", nodesbp/20 ,") ; errorbp (", errorbp/20 ,") ; testbp (", testbp/20, ").")
print(say)
say <- paste("Diagonal: For n =", i, ": nodesap (", nodesap/20 ,") ; errorap (", errorap/20 ,") ; testap (", testap/20, ").")
print(say)
d_training_error_bp <- c(d_training_error_bp, errorbp/20)
d_test_error_bp <- c(d_test_error_bp, testbp/20)
d_nodes_bp <- c(d_nodes_bp, nodesbp/20)
d_training_error_ap <- c(d_training_error_ap, errorap/20)
d_test_error_ap <- c(d_test_error_ap, testap/20)
d_nodes_ap <- c(d_nodes_ap, nodesap/20)
}
p_training_error_bp <- c()
p_test_error_bp <- c()
p_nodes_bp <- c()
p_training_error_ap <- c()
p_test_error_ap <- c()
p_nodes_ap <- c()
for (i in n){
dir <- paste0("Par", i, "Ds/")
fname <- paste0(dir,"par",toString(i)) #ie: Par125Ds/par125
oname <- paste0(fname, ".out") #ie: par125Ds/par125.out
system(paste("mv", oname, paste0(oname, ".old")))
for (j in 1:20){
dname <- paste0(fname, "_", toString(j)) #ie: par125_1
# Run C4.5, redirect interesting output to <fname>.out
cmd <- paste("c4.5 -f", dname, "-g -u | grep \"<<\" >>", oname)
system(cmd)
}
nodesbp <- 0
errorbp <- 0
testbp <- 0
nodesap <- 0
errorap <- 0
testap <- 0
# Run the Regular Expression to find the values
# Regex: 1-NodesBP 2-ErrorBP 3-NodesAP 4-ErrorAP
regex <- ".*?(\\d+).*?([+-]?\\d*\\.\\d+)(?![-+0-9\\.]).*?(\\d+).*?([+-]?\\d*\\.\\d+)(?![-+0-9\\.])"
con <- file(oname, "r")
while (TRUE) {
line <- readLines(con, n=1)
if(length(line) == 0) {break}
m <- str_match(line, regex)
nodesbp <- nodesbp + strtoi(m[2])
errorbp <- errorbp + as.double(m[3])
nodesap <- nodesap + strtoi(m[4])
errorap <- errorap + as.double(m[5])
line <- readLines(con, n=1)
if(length(line) == 0) {break}
m <- str_match(line, regex)
testbp <- testbp + as.double(m[3])
testap <- testap + as.double(m[5])
}
close(con)
say <- paste("Parallel: For n =", i, ": nodesbp (", nodesbp/20 ,") ; errorbp (", errorbp/20 ,") ; testbp (", testbp/20, ").")
print(say)
say <- paste("Parallel: For n =", i, ": nodesap (", nodesap/20 ,") ; errorap (", errorap/20 ,") ; testap (", testap/20, ").")
print(say)
p_training_error_bp <- c(p_training_error_bp, errorbp/20)
p_test_error_bp <- c(p_test_error_bp, testbp/20)
p_nodes_bp <- c(p_nodes_bp, nodesbp/20)
p_training_error_ap <- c(p_training_error_ap, errorap/20)
p_test_error_ap <- c(p_test_error_ap, testap/20)
p_nodes_ap <- c(p_nodes_ap, nodesap/20)
}
## PLOTTING ##
library(ggplot2)
# Before prunning training error and test error
png("errorbp.png")
limy <- c(min(d_training_error_bp,d_test_error_bp),max(d_training_error_bp,d_test_error_bp))
plot(n, d_training_error_bp, col="red", ylim=limy, type="o", xlab="Training points", ylab="Percentual error")
lines(n, d_test_error_bp, col="red", type="o", lty=3)
lines(n, p_training_error_bp, col="blue", type="o")
lines(n, p_test_error_bp, col="blue", type="o", lty=3)
legend( x="topright",
, legend=c("Diagonal train", "Diagonal test", "Parallel train", "Parallel test")
, col=c("red", "red", "blue", "blue")
, lty=c(1,3,1,3)
)
dev.off()
# Before prunning node count
png("nodecbp.png")
limy <- c(min(p_nodes_bp, d_nodes_bp),max(p_nodes_bp,d_nodes_bp))
plot(n, d_nodes_bp, col="red", ylim=limy, type="o", xlab="Training points", ylab="Decision tree nodes")
lines(n, p_nodes_bp, col="blue", type="o")
legend( x="topleft",
, legend=c("Diagonal", "Parallel")
, col=c("red", "blue")
, lty=c(1,1)
)
dev.off()
# After prunning training error and test error
png("errorap.png")
limy <- c(min(d_training_error_ap,d_test_error_ap),max(d_training_error_ap,d_test_error_ap))
plot(n, d_training_error_ap, col="red", ylim=limy, type="o", xlab="Training points", ylab="Percentual error")
lines(n, d_test_error_ap, col="red", type="o", lty=3)
lines(n, p_training_error_ap, col="blue", type="o")
lines(n, p_test_error_ap, col="blue", type="o", lty=3)
legend( x="topright",
, legend=c("Diagonal train", "Diagonal test", "Parallel train", "Parallel test")
, col=c("red", "red", "blue", "blue")
, lty=c(1,3,1,3)
)
dev.off()
# After prunning node count
png("nodecap.png")
limy <- c(min(p_nodes_ap, d_nodes_ap),max(p_nodes_ap,d_nodes_ap))
plot(n, d_nodes_ap, col="red", ylim=limy,type="o", xlab="Training points", ylab="Decision tree nodes")
lines(n, p_nodes_ap, col="blue", type="o")
legend( x="topleft",
, legend=c("Diagonal", "Parallel")
, col=c("red", "blue")
, lty=c(1,1)
)
dev.off()
|
library(LaplacesDemon)
### Name: Hangartner.Diagnostic
### Title: Hangartner's Convergence Diagnostic
### Aliases: Hangartner.Diagnostic
### Keywords: Diagnostic MCMC
### ** Examples
library(LaplacesDemon)
N <- 1000
K <- 3
x <- rcat(N, rep(1/K,K))
hd <- Hangartner.Diagnostic(x, J=2)
hd
| /data/genthat_extracted_code/LaplacesDemon/examples/Hangartner.Diagnostic.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 294 | r | library(LaplacesDemon)
### Name: Hangartner.Diagnostic
### Title: Hangartner's Convergence Diagnostic
### Aliases: Hangartner.Diagnostic
### Keywords: Diagnostic MCMC
### ** Examples
library(LaplacesDemon)
N <- 1000
K <- 3
x <- rcat(N, rep(1/K,K))
hd <- Hangartner.Diagnostic(x, J=2)
hd
|
library(biomod2)
project_biomod <- function(model, modelOut, j, species, version, fp_out) {
months <- c("January", "February", "March",
"April", "May", "June",
"July", "August", "September",
"October", "November", "December")
# ------- Project GAMS -------
# Create vector all runs of model algorithm for projection
select_models <- c()
for (k in 1:10) {
select_models[k] <- paste0(species, version, "_AllData_RUN", k, "_", model)
}
proj <- BIOMOD_Projection(modeling.output = modelOut,
new.env = covars,
proj.name = paste0(model, "_", j),
selected.models = select_models,
binary.meth = 'ROC',
compress = 'xz',
build.clamping.mask = TRUE,
output.format = '.grd')
# Load ensemble forecast as raster
# Divide by 1000 to convert probabilities to percentages
proj_raster <- raster(file.path(paste0(species, version), paste0("proj_", model, "_", j), paste0("proj_", model, "_", j, "_", species, version, '.grd'))) %>%
`/`(1000)
crs(proj_raster) <- '+init=epsg:4121 +proj=longlat +ellps=GRS80 +datum=GGRS87 +no_defs +towgs84=-199.87,74.79,246.62'
# Save projection raster as data frame with xy coords and no NAs
proj_df <- as.data.frame(proj_raster, xy = TRUE, na.rm = TRUE)
# Assign column names
names(proj_df) <- c('pred', 'x', 'y')
# -------- Plot projection --------
ggplot() +
# Add projection data
geom_tile(data = proj_df, aes(x, y, fill = pred)) +
# Add projection color gradient and label
scale_fill_gradientn(colors = inferno(500), limits = c(0,1), na.value = "white") +
labs(x = "",
y = "",
main = months[j]) +
# Add world map data
geom_polygon(data = worldmap, aes(long, lat, group = group), fill = NA, colour = "gray43") +
coord_quickmap(xlim = c(round(min(proj_df$x)), round(max(proj_df$x))),
ylim = c(round(min(proj_df$y)), round(max(proj_df$y))),
expand = TRUE) +
# Remove grid lines
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
# Save plot to hard drive
ggsave(filename = file.path(fp_out, species, version, "Biomod", "Plots", paste0(model, "_proj_", j, ".png")), width = 7, height = 7)
} | /Code/project_biomod.R | no_license | BigelowLab/calanus-for-whales | R | false | false | 2,464 | r |
library(biomod2)
project_biomod <- function(model, modelOut, j, species, version, fp_out) {
months <- c("January", "February", "March",
"April", "May", "June",
"July", "August", "September",
"October", "November", "December")
# ------- Project GAMS -------
# Create vector all runs of model algorithm for projection
select_models <- c()
for (k in 1:10) {
select_models[k] <- paste0(species, version, "_AllData_RUN", k, "_", model)
}
proj <- BIOMOD_Projection(modeling.output = modelOut,
new.env = covars,
proj.name = paste0(model, "_", j),
selected.models = select_models,
binary.meth = 'ROC',
compress = 'xz',
build.clamping.mask = TRUE,
output.format = '.grd')
# Load ensemble forecast as raster
# Divide by 1000 to convert probabilities to percentages
proj_raster <- raster(file.path(paste0(species, version), paste0("proj_", model, "_", j), paste0("proj_", model, "_", j, "_", species, version, '.grd'))) %>%
`/`(1000)
crs(proj_raster) <- '+init=epsg:4121 +proj=longlat +ellps=GRS80 +datum=GGRS87 +no_defs +towgs84=-199.87,74.79,246.62'
# Save projection raster as data frame with xy coords and no NAs
proj_df <- as.data.frame(proj_raster, xy = TRUE, na.rm = TRUE)
# Assign column names
names(proj_df) <- c('pred', 'x', 'y')
# -------- Plot projection --------
ggplot() +
# Add projection data
geom_tile(data = proj_df, aes(x, y, fill = pred)) +
# Add projection color gradient and label
scale_fill_gradientn(colors = inferno(500), limits = c(0,1), na.value = "white") +
labs(x = "",
y = "",
main = months[j]) +
# Add world map data
geom_polygon(data = worldmap, aes(long, lat, group = group), fill = NA, colour = "gray43") +
coord_quickmap(xlim = c(round(min(proj_df$x)), round(max(proj_df$x))),
ylim = c(round(min(proj_df$y)), round(max(proj_df$y))),
expand = TRUE) +
# Remove grid lines
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
# Save plot to hard drive
ggsave(filename = file.path(fp_out, species, version, "Biomod", "Plots", paste0(model, "_proj_", j, ".png")), width = 7, height = 7)
} |
## calculate inequality indicators
##################################
# load packages and data -----------------------------------------------------------
library(dplyr)
library(survey)
library(convey)
library(eurostat)
load("./data/silc_eu28.RData")
rm(silc.d.store, silc.h.store, silc.p.store, silc.r.store)
# Subsetting? --------------------------------------------------------------
#
# # # To get useful results we may want to subset to only positive income (at least one positive income!)
silc.p1.ppp.store <- silc.p1.ppp.store %>% filter_at(vars(equivalent_pre_tax_factor_income:equivalent_post_tax_disposable_income), any_vars(. > 0))
silc.p2.ppp.store <- silc.p2.ppp.store %>% filter_at(vars(pre_tax_factor_income:post_tax_disposable_income), any_vars(. > 0))
# get population data for Theil -------------------------------------------
pop <- get_eurostat("naida_10_pe", time_format = "num")
pop <- pop %>% filter(na_item == "POP_NC")
# loop to calculate indicators for each year ------------------------------
indicators <- array(NA, c(14, 50, 2))
theil <- array(NA, c(40, 500, 2), dimnames = list(c(as.character(1:40)),c(as.character(1:500)),c(as.character(1:2))))
for(year in 2004:2017){
indicators[year-2003, 1,] <- year
# Creating Survey Objects -------------------------------------------------
silc.p1.y <- silc.p1.ppp.store %>% filter(rb010 %in% year)
silc.p2.y <- silc.p2.ppp.store %>% filter(pb010 %in% year)
indicators[year-2003, 35,1] <- toString(levels(as.factor(silc.p1.y$rb020)))
indicators[year-2003, 36,1] <- nlevels(as.factor(silc.p1.y$rb020))
indicators[year-2003, 35,2] <- toString(levels(as.factor(silc.p2.y$pb020)))
indicators[year-2003, 36,2] <- nlevels(as.factor(silc.p2.y$pb020))
silc.p1.svy <- svydesign(ids = ~ id_h,
strata = ~db040,
weights = ~rb050,
data = silc.p1.y) %>% convey_prep()
silc.p2.svy <- svydesign(ids = ~id_h,
strata = ~db040,
weights = ~pb040,
data = silc.p2.y) %>% convey_prep()
# Indicators --------------------------------------------------------------
# Mean Income
#
indicators[year-2003, 2, 1] <- svymean(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0))
indicators[year-2003, 3, 1] <- svymean(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0))
indicators[year-2003, 4, 1] <- svymean(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0))
indicators[year-2003, 5, 1] <- svymean(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0))
indicators[year-2003, 6, 1] <- svymean(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0))
indicators[year-2003, 2, 2] <- svymean(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0))
indicators[year-2003, 3, 2] <- svymean(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0))
indicators[year-2003, 4, 2] <- svymean(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0))
# For comparing countries
# svyby(~total.inc, ~as.factor(db020), silc.pd.svy, svymean)
# svyby(~hy010, ~as.factor(db020), silc.hd.svy, svymean)
# Median Income
#
indicators[year-2003, 7, 1] <- svyquantile(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0), quantiles = c(0.5))
indicators[year-2003, 8, 1] <- svyquantile(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0), quantiles = c(0.5))
indicators[year-2003, 9, 1] <- svyquantile(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0), quantiles = c(0.5))
indicators[year-2003, 10, 1] <- svyquantile(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0), quantiles = c(0.5))
indicators[year-2003, 11, 1] <- svyquantile(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0), quantiles = c(0.5))
indicators[year-2003, 7, 2] <- svyquantile(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0), quantiles = c(0.5))
indicators[year-2003, 8, 2] <- svyquantile(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0), quantiles = c(0.5))
indicators[year-2003, 9, 2] <- svyquantile(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0), quantiles = c(0.5))
# For comparing countries
# svyby(~total.inc, ~as.factor(db020), silc.pd.svy,
# svyquantile, ~total.inc, quantiles = c(0.5), keep.var = FALSE)
# svyby(~hy010, ~as.factor(db020), silc.hd.svy,
# svyquantile, ~hy010, quantiles = c(0.5), keep.var = FALSE)
#
# # Decile Points
# #
# svyquantile(~total.inc, silc.pd.svy, quantiles = seq(0, 1, 0.1))
# svyquantile(~hy010, silc.hd.svy, quantiles = seq(0, 1, 0.1))
# # For comparing countries
# svyby(~total.inc, ~as.factor(db020), silc.pd.svy,
# svyquantile, ~total.inc, quantiles = seq(0, 1, 0.1), keep.var = FALSE)
# svyby(~hy010, ~as.factor(hb020), silc.pd.svy,
# svyquantile, ~total.inc, quantiles = seq(0, 1, 0.1), keep.var = FALSE)
# Quantile Share Ratio
#
indicators[year-2003, 12, 1] <- svyqsr(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0), 0.2, 0.8)
indicators[year-2003, 13, 1] <- svyqsr(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0), 0.2, 0.8)
indicators[year-2003, 14, 1] <- svyqsr(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0), 0.2, 0.8)
indicators[year-2003, 15, 1] <- svyqsr(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0), 0.2, 0.8)
indicators[year-2003, 16, 1] <- svyqsr(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0), 0.2, 0.8)
indicators[year-2003, 12, 2] <- svyqsr(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0), 0.2, 0.8)
indicators[year-2003, 13, 2] <- svyqsr(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0), 0.2, 0.8)
indicators[year-2003, 14, 2] <- svyqsr(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0), 0.2, 0.8)
# For comparing countries
# svyby(~total.inc, ~as.factor(db020), silc.pd.svy, svyqsr, 0.2, 0.8)
# svyby(~hy010, ~as.factor(db020), silc.hd.svy, svyqsr, 0.2, 0.8)
# Top 10% Income Share
#
indicators[year-2003, 17, 1] <- svytotal(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income >=
as.numeric(svyquantile(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0), quantile = 0.9)))) /
svytotal(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0))
indicators[year-2003, 18, 1] <- svytotal(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income >=
as.numeric(svyquantile(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0), quantile = 0.9)))) /
svytotal(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0))
indicators[year-2003, 19, 1] <- svytotal(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income >=
as.numeric(svyquantile(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0), quantile = 0.9)))) /
svytotal(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0))
indicators[year-2003, 20, 1] <- svytotal(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed >=
as.numeric(svyquantile(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0), quantile = 0.9)))) /
svytotal(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0))
indicators[year-2003, 21, 1] <- svytotal(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed >=
as.numeric(svyquantile(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0), quantile = 0.9)))) /
svytotal(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0))
indicators[year-2003, 17, 2] <- svytotal(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income >=
as.numeric(svyquantile(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0), quantile = 0.9)))) /
svytotal(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0))
indicators[year-2003, 18, 2] <- svytotal(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income >=
as.numeric(svyquantile(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0), quantile = 0.9)))) /
svytotal(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0))
indicators[year-2003, 19, 2] <- svytotal(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income >=
as.numeric(svyquantile(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0), quantile = 0.9)))) /
svytotal(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0))
# Gini Coefficient
#
indicators[year-2003, 22, 1] <- svygini(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0))
indicators[year-2003, 23, 1] <- svygini(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0))
indicators[year-2003, 24, 1] <- svygini(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0))
indicators[year-2003, 25, 1] <- svygini(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0))
indicators[year-2003, 26, 1] <- svygini(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0))
indicators[year-2003, 22, 2] <- svygini(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0))
indicators[year-2003, 23, 2] <- svygini(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0))
indicators[year-2003, 24, 2] <- svygini(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0))
# For comparing countries
# svyby(~total.inc, ~as.factor(db020), silc.pd.svy, svygini)
# svyby(~hy010, ~as.factor(db020), silc.hd.svy, svygini)
# Theil Index
#
indicators[year-2003, 27, 1] <- svygei(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0), epsilon = 1)
indicators[year-2003, 28, 1] <- svygei(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0), epsilon = 1)
indicators[year-2003, 29, 1] <- svygei(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0), epsilon = 1)
indicators[year-2003, 30, 1] <- svygei(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0), epsilon = 1)
indicators[year-2003, 31, 1] <- svygei(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0), epsilon = 1)
indicators[year-2003, 27, 2] <- svygei(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0), epsilon = 1)
indicators[year-2003, 28, 2] <- svygei(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0), epsilon = 1)
indicators[year-2003, 29, 2] <- svygei(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0), epsilon = 1)
# For comparing countries
# svyby(~total.inc, ~as.factor(db020), silc.pd.svy,
# svygei, epsilon = 1)
# svyby(~hy010, ~as.factor(db020), silc.hd.svy,
# svygei, epsilon = 1)
# Theil Index Decomposition
#
# svygeidec(~equivalent_pre_tax_factor_income, ~rb020,silc.p1.svy, epsilon = 1)
# table(silc.p1.y$rb020)
# calculate sum of total population for all countries
pop.y <- pop %>% filter(time %in% year)
pop_sum <- pop.y %>% filter(geo %in% c(levels(as.factor(silc.p1.y$rb020)))) %>% summarise(pop_sum = sum(values))
# first theil loop p1
for(country in 1:nlevels(as.factor(silc.p1.y$rb020))){
# colnames
colnames(theil)[1+(year-2004)*13] <- paste0("countries",year)
colnames(theil)[2+(year-2004)*13] <- paste0("fi.theil",year)
colnames(theil)[3+(year-2004)*13] <- paste0("fi.mean",year)
colnames(theil)[4+(year-2004)*13] <- paste0("fi.econ.weight",year)
colnames(theil)[5+(year-2004)*13] <- paste0("fi.ineq.share",year)
colnames(theil)[6+(year-2004)*13] <- paste0("ni.theil",year)
colnames(theil)[7+(year-2004)*13] <- paste0("ni.mean",year)
colnames(theil)[8+(year-2004)*13] <- paste0("ni.econ.weight",year)
colnames(theil)[9+(year-2004)*13] <- paste0("ni.ineq.share",year)
colnames(theil)[10+(year-2004)*13] <- paste0("di.theil",year)
colnames(theil)[11+(year-2004)*13] <- paste0("di.mean",year)
colnames(theil)[12+(year-2004)*13] <- paste0("di.econ.weight",year)
colnames(theil)[13+(year-2004)*13] <- paste0("di.ineq.share",year)
# country population
pop_c <- pop.y$values[pop.y$geo==levels(as.factor(silc.p1.y$rb020))[country]]
# country
theil[country,1+(year-2004)*13,1] <- levels(as.factor(silc.p1.y$rb020))[country]
# country theil
theil[country,2+(year-2004)*13,1] <- svygei(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, rb020==levels(as.factor(silc.p1.y$rb020))[country] & equivalent_pre_tax_factor_income > 0), epsilon = 1)
# country mean
theil[country,3+(year-2004)*13,1] <- svymean(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, rb020==levels(as.factor(silc.p1.y$rb020))[country]& equivalent_pre_tax_factor_income > 0))
# country econ weight
theil[country,4+(year-2004)*13,1] <- (pop_c*as.numeric(theil[country,3+(year-2004)*13,1]))/(as.numeric(pop_sum)*as.numeric(indicators[year-2003, 2, 1]))
# country ineq share
#theil[country,5+(year-2004)*13,1] <- (theil[country,4+(year-2004)*13,1]*theil[country,2+(year-2004)*13,1])/
# country theil
theil[country,6+(year-2004)*13,1] <- svygei(~equivalent_pre_tax_national_income, subset(silc.p1.svy, rb020==levels(as.factor(silc.p1.y$rb020))[country]& equivalent_pre_tax_national_income > 0), epsilon = 1)
# country mean
theil[country,7+(year-2004)*13,1] <- svymean(~equivalent_pre_tax_national_income, subset(silc.p1.svy, rb020==levels(as.factor(silc.p1.y$rb020))[country]& equivalent_pre_tax_national_income > 0))
# country econ weight
theil[country,8+(year-2004)*13,1] <- (pop_c*as.numeric(theil[country,7+(year-2004)*13,1]))/(as.numeric(pop_sum)*as.numeric(indicators[year-2003, 2, 1]))
# country theil
theil[country,10+(year-2004)*13,1] <- svygei(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, rb020==levels(as.factor(silc.p1.y$rb020))[country]&equivalent_post_tax_disposable_income>0), epsilon = 1)
# country mean
theil[country,11+(year-2004)*13,1] <- svymean(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, rb020==levels(as.factor(silc.p1.y$rb020))[country]&equivalent_post_tax_disposable_income>0))
# country econ weight
theil[country,12+(year-2004)*13,1] <- (pop_c*as.numeric(theil[country,11+(year-2004)*13,1]))/(as.numeric(pop_sum)*as.numeric(indicators[year-2003, 2, 1]))
}
theil[country+1,1+(year-2004)*13,1] <- "between"
# first theil loop p2
for(country in 1:nlevels(as.factor(silc.p2.y$pb020))){
# country population
pop_c <- pop.y$values[pop.y$geo==levels(as.factor(silc.p2.y$pb020))[country]]
# country
theil[country,1+(year-2004)*13,2] <- levels(as.factor(silc.p2.y$pb020))[country]
# country theil
theil[country,2+(year-2004)*13,2] <- svygei(~pre_tax_factor_income, subset(silc.p2.svy, pb020==levels(as.factor(silc.p2.y$pb020))[country]&pre_tax_factor_income > 0), epsilon = 1)
# country mean
theil[country,3+(year-2004)*13,2] <- svymean(~pre_tax_factor_income, subset(silc.p2.svy, pb020==levels(as.factor(silc.p2.y$pb020))[country]&pre_tax_factor_income > 0))
# country econ weight
theil[country,4+(year-2004)*13,2] <- (pop_c*as.numeric(theil[country,3+(year-2004)*13,2]))/(as.numeric(pop_sum)*as.numeric(indicators[year-2003, 2, 2]))
# country ineq share
#theil[country,5+(year-2004)*13,2] <- (theil[country,4+(year-2004)*13,2]*theil[country,2+(year-2004)*13,2])/
# country theil
theil[country,6+(year-2004)*13,2] <- svygei(~pre_tax_national_income, subset(silc.p2.svy, pb020==levels(as.factor(silc.p2.y$pb020))[country]& pre_tax_national_income > 0), epsilon = 1)
# country mean
theil[country,7+(year-2004)*13,2] <- svymean(~pre_tax_national_income, subset(silc.p2.svy, pb020==levels(as.factor(silc.p2.y$pb020))[country]& pre_tax_national_income > 0))
# country econ weight
theil[country,8+(year-2004)*13,2] <- (pop_c*as.numeric(theil[country,7+(year-2004)*13,2]))/(as.numeric(pop_sum)*as.numeric(indicators[year-2003, 2, 2]))
# country theil
theil[country,10+(year-2004)*13,2] <- svygei(~post_tax_disposable_income, subset(silc.p2.svy, pb020==levels(as.factor(silc.p2.y$pb020))[country]&post_tax_disposable_income>0), epsilon = 1)
# country mean
theil[country,11+(year-2004)*13,2] <- svymean(~post_tax_disposable_income, subset(silc.p2.svy, pb020==levels(as.factor(silc.p2.y$pb020))[country]&post_tax_disposable_income>0))
# country econ weight
theil[country,12+(year-2004)*13,2] <- (pop_c*as.numeric(theil[country,11+(year-2004)*13,2]))/(as.numeric(pop_sum)*as.numeric(indicators[year-2003, 2, 2]))
}
theil[country+1,1+(year-2004)*13,2] <- "between"
# calculate Theil manually
indicators[year-2003, 32, 1] <- t(na.exclude(as.numeric(theil[,2+(year-2004)*13,1])))%*%na.exclude(as.numeric(theil[,4+(year-2004)*13,1])) + t(na.exclude(as.numeric(theil[,4+(year-2004)*13,1])))%*%(log(na.exclude(as.numeric(theil[,3+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 2, 1])))
indicators[year-2003, 33, 1] <- t(na.exclude(as.numeric(theil[,6+(year-2004)*13,1])))%*%na.exclude(as.numeric(theil[,8+(year-2004)*13,1])) + t(na.exclude(as.numeric(theil[,8+(year-2004)*13,1])))%*%(log(na.exclude(as.numeric(theil[,7+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 3, 1])))
indicators[year-2003, 34, 1] <- t(na.exclude(as.numeric(theil[,10+(year-2004)*13,1])))%*%na.exclude(as.numeric(theil[,12+(year-2004)*13,1])) + t(na.exclude(as.numeric(theil[,12+(year-2004)*13,1])))%*%(log(na.exclude(as.numeric(theil[,11+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 4, 1])))
indicators[year-2003, 32, 2] <- t(na.exclude(as.numeric(theil[,2+(year-2004)*13,2])))%*%na.exclude(as.numeric(theil[,4+(year-2004)*13,2])) + t(na.exclude(as.numeric(theil[,4+(year-2004)*13,2])))%*%(log(na.exclude(as.numeric(theil[,3+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 2, 2])))
indicators[year-2003, 33, 2] <- t(na.exclude(as.numeric(theil[,6+(year-2004)*13,2])))%*%na.exclude(as.numeric(theil[,8+(year-2004)*13,2])) + t(na.exclude(as.numeric(theil[,8+(year-2004)*13,2])))%*%(log(na.exclude(as.numeric(theil[,7+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 3, 2])))
indicators[year-2003, 34, 2] <- t(na.exclude(as.numeric(theil[,10+(year-2004)*13,2])))%*%na.exclude(as.numeric(theil[,12+(year-2004)*13,2])) + t(na.exclude(as.numeric(theil[,12+(year-2004)*13,2])))%*%(log(na.exclude(as.numeric(theil[,11+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 4, 2])))
# within component
indicators[year-2003, 37, 1] <- t(na.exclude(as.numeric(theil[,2+(year-2004)*13,1])))%*%na.exclude(as.numeric(theil[,4+(year-2004)*13,1]))
indicators[year-2003, 38, 1] <- t(na.exclude(as.numeric(theil[,6+(year-2004)*13,1])))%*%na.exclude(as.numeric(theil[,8+(year-2004)*13,1]))
indicators[year-2003, 39, 1] <- t(na.exclude(as.numeric(theil[,10+(year-2004)*13,1])))%*%na.exclude(as.numeric(theil[,12+(year-2004)*13,1]))
indicators[year-2003, 37, 2] <- t(na.exclude(as.numeric(theil[,2+(year-2004)*13,2])))%*%na.exclude(as.numeric(theil[,4+(year-2004)*13,2]))
indicators[year-2003, 38, 2] <- t(na.exclude(as.numeric(theil[,6+(year-2004)*13,2])))%*%na.exclude(as.numeric(theil[,8+(year-2004)*13,2]))
indicators[year-2003, 39, 2] <- t(na.exclude(as.numeric(theil[,10+(year-2004)*13,2])))%*%na.exclude(as.numeric(theil[,12+(year-2004)*13,2]))
# between component
indicators[year-2003, 40, 1] <- t(na.exclude(as.numeric(theil[,4+(year-2004)*13,1])))%*%(log(na.exclude(as.numeric(theil[,3+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 2, 1])))
indicators[year-2003, 41, 1] <- t(na.exclude(as.numeric(theil[,8+(year-2004)*13,1])))%*%(log(na.exclude(as.numeric(theil[,7+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 3, 1])))
indicators[year-2003, 42, 1] <- t(na.exclude(as.numeric(theil[,12+(year-2004)*13,1])))%*%(log(na.exclude(as.numeric(theil[,11+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 4, 1])))
indicators[year-2003, 40, 2] <- t(na.exclude(as.numeric(theil[,4+(year-2004)*13,2])))%*%(log(na.exclude(as.numeric(theil[,3+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 2, 2])))
indicators[year-2003, 41, 2] <- t(na.exclude(as.numeric(theil[,8+(year-2004)*13,2])))%*%(log(na.exclude(as.numeric(theil[,7+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 3, 2])))
indicators[year-2003, 42, 2] <- t(na.exclude(as.numeric(theil[,12+(year-2004)*13,2])))%*%(log(na.exclude(as.numeric(theil[,11+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 4, 2])))
#between shares
theil[country+1,5+(year-2004)*13,1] <- as.numeric(indicators[year-2003, 40, 1]) / as.numeric(indicators[year-2003, 32, 1])
theil[country+1,9+(year-2004)*13,1] <- as.numeric(indicators[year-2003, 41, 1]) / as.numeric(indicators[year-2003, 33, 1])
theil[country+1,13+(year-2004)*13,1] <- as.numeric(indicators[year-2003, 42, 1])/ as.numeric(indicators[year-2003, 34, 1])
#between shares
theil[country+1,5+(year-2004)*13,2] <- as.numeric(indicators[year-2003, 40, 2]) / as.numeric(indicators[year-2003, 32, 2])
theil[country+1,9+(year-2004)*13,2] <- as.numeric(indicators[year-2003, 41, 2]) / as.numeric(indicators[year-2003, 33, 2])
theil[country+1,13+(year-2004)*13,2] <- as.numeric(indicators[year-2003, 42, 2])/ as.numeric(indicators[year-2003, 34, 2])
# 2nd theil loop p1 - calculate ineq shares
for(country in 1:nlevels(as.factor(silc.p1.y$rb020))){
# country ineq share
theil[country,5+(year-2004)*13,1] <- (as.numeric(theil[country,4+(year-2004)*13,1])*as.numeric(theil[country,2+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 32, 1])
theil[country,9+(year-2004)*13,1] <- (as.numeric(theil[country,8+(year-2004)*13,1])*as.numeric(theil[country,6+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 33, 1])
theil[country,13+(year-2004)*13,1] <- (as.numeric(theil[country,12+(year-2004)*13,1])*as.numeric(theil[country,10+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 34, 1])
}
# 2nd theil loop p2 - calculate ineq shares
for(country in 1:nlevels(as.factor(silc.p2.y$pb020))){
# country ineq share
theil[country,5+(year-2004)*13,2] <- (as.numeric(theil[country,4+(year-2004)*13,2])*as.numeric(theil[country,2+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 32, 2])
theil[country,9+(year-2004)*13,2] <- (as.numeric(theil[country,8+(year-2004)*13,2])*as.numeric(theil[country,6+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 33, 2])
theil[country,13+(year-2004)*13,2] <- (as.numeric(theil[country,12+(year-2004)*13,2])*as.numeric(theil[country,10+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 34, 2])
}
#
}
# create dataframes with indicators for p1 and p2 and name them
indicators.p1 <- as.data.frame(indicators[,,1])
indicators.p2 <- as.data.frame(indicators[,,2])
theil.p1 <- as.data.frame(theil[,,1])
theil.p2 <- as.data.frame(theil[,,2])
indicators.p1 <- indicators.p1 %>% rename(year = V1, mean_eptfi = V2, mean_eptni = V3, mean_eptdi = V4, mean_eptfii = V5, mean_eptdii = V6, median_eptfi = V7, median_eptni = V8, median_eptdi = V9, median_eptfii = V10, median_eptdii = V11, qsr8020_eptfi = V12, qsr8020_eptni = V13, qsr8020_eptdi = V14, qsr8020_eptfii = V15, qsr8020_eptdii = V16, top10_eptfi = V17, top10_eptni = V18, top10_eptdi = V19, top10_eptfii = V20, top10_eptdii = V21, gini_eptfi = V22, gini_eptni = V23, gini_eptdi = V24, gini_eptfii = V25, gini_eptdii = V26, theil_eptfi = V27, theil_eptni = V28, theil_eptdi = V29, theil_eptfii = V30, theil_eptdii = V31, theil_fi_manual = V32, theil_ni_manual = V33, theil_di_manual = V34, countries = V35, ncountries = V36, theil_fi_within = V37, theil_ni_within = V38, theil_di_within = V39, theil_fi_between = V40, theil_ni_between = V41, theil_di_between = V42)
indicators.p2 <- indicators.p2 %>% rename(year = V1, mean_ptfi = V2, mean_ptni = V3, mean_ptdi = V4, mean_ptfii = V5, mean_ptdii = V6, median_ptfi = V7, median_ptni = V8, median_ptdi = V9, median_ptfii = V10, median_ptdii = V11, qsr8020_ptfi = V12, qsr8020_ptni = V13, qsr8020_ptdi = V14, qsr8020_ptfii = V15, qsr8020_ptdii = V16, top10_ptfi = V17, top10_ptni = V18, top10_ptdi = V19, top10_ptfii = V20, top10_ptdii = V21, gini_ptfi = V22, gini_ptni = V23, gini_ptdi = V24, gini_ptfii = V25, gini_ptdii = V26, theil_ptfi = V27, theil_ptni = V28, theil_ptdi = V29, theil_ptfii = V30, theil_ptdii = V31, theil_fi_manual = V32, theil_ni_manual = V33, theil_di_manual = V34, countries = V35, ncountries = V36, theil_fi_within = V37, theil_ni_within = V38, theil_di_within = V39, theil_fi_between = V40, theil_ni_between = V41, theil_di_between = V42)
# calculate theil between ineq share
indicators.p1 <- indicators.p1 %>% mutate(theil_between_share_fi = as.numeric(as.character(theil_fi_between)) / as.numeric(as.character(theil_fi_manual)), theil_between_share_ni = as.numeric(as.character(theil_ni_between)) / as.numeric(as.character(theil_ni_manual)), theil_between_share_di = as.numeric(as.character(theil_di_between)) / as.numeric(as.character(theil_di_manual)))
indicators.p2 <- indicators.p2 %>% mutate(theil_between_share_fi = as.numeric(as.character(theil_fi_between)) / as.numeric(as.character(theil_fi_manual)), theil_between_share_ni = as.numeric(as.character(theil_ni_between)) / as.numeric(as.character(theil_ni_manual)), theil_between_share_di = as.numeric(as.character(theil_di_between)) / as.numeric(as.character(theil_di_manual)))
indicators.p1 <- indicators.p1 %>% select(-(V43:V50))
indicators.p2 <- indicators.p2 %>% select(-(V43:V50))
# check proportions -------------------------------------------------------
#
#
# table(silc.h.store$hb020[which(silc.h.store$hb010==2017)])
# cw <- silc.p1.y %>% filter(equivalent_post_tax_disposable_income>0) %>% group_by(rb020) %>% summarise(sum_cweight = sum(rb050))
# cw <- cw %>% mutate(propcw = sum_cweight/sum(sum_cweight))
#
# cwj <- left_join(cw, pop.nobs, by = c("rb020"="geo"))
# save data ---------------------------------------------------------------
save(indicators.p1, indicators.p2, theil.p1, theil.p2, file = "./data/eu28_indicators.RData")
| /R/eu28_indicators.R | permissive | philippwarum/ineq_project | R | false | false | 28,864 | r | ## calculate inequality indicators
##################################
# load packages and data -----------------------------------------------------------
library(dplyr)
library(survey)
library(convey)
library(eurostat)
load("./data/silc_eu28.RData")
rm(silc.d.store, silc.h.store, silc.p.store, silc.r.store)
# Subsetting? --------------------------------------------------------------
#
# # # To get useful results we may want to subset to only positive income (at least one positive income!)
silc.p1.ppp.store <- silc.p1.ppp.store %>% filter_at(vars(equivalent_pre_tax_factor_income:equivalent_post_tax_disposable_income), any_vars(. > 0))
silc.p2.ppp.store <- silc.p2.ppp.store %>% filter_at(vars(pre_tax_factor_income:post_tax_disposable_income), any_vars(. > 0))
# get population data for Theil -------------------------------------------
pop <- get_eurostat("naida_10_pe", time_format = "num")
pop <- pop %>% filter(na_item == "POP_NC")
# loop to calculate indicators for each year ------------------------------
indicators <- array(NA, c(14, 50, 2))
theil <- array(NA, c(40, 500, 2), dimnames = list(c(as.character(1:40)),c(as.character(1:500)),c(as.character(1:2))))
for(year in 2004:2017){
indicators[year-2003, 1,] <- year
# Creating Survey Objects -------------------------------------------------
silc.p1.y <- silc.p1.ppp.store %>% filter(rb010 %in% year)
silc.p2.y <- silc.p2.ppp.store %>% filter(pb010 %in% year)
indicators[year-2003, 35,1] <- toString(levels(as.factor(silc.p1.y$rb020)))
indicators[year-2003, 36,1] <- nlevels(as.factor(silc.p1.y$rb020))
indicators[year-2003, 35,2] <- toString(levels(as.factor(silc.p2.y$pb020)))
indicators[year-2003, 36,2] <- nlevels(as.factor(silc.p2.y$pb020))
silc.p1.svy <- svydesign(ids = ~ id_h,
strata = ~db040,
weights = ~rb050,
data = silc.p1.y) %>% convey_prep()
silc.p2.svy <- svydesign(ids = ~id_h,
strata = ~db040,
weights = ~pb040,
data = silc.p2.y) %>% convey_prep()
# Indicators --------------------------------------------------------------
# Mean Income
#
indicators[year-2003, 2, 1] <- svymean(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0))
indicators[year-2003, 3, 1] <- svymean(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0))
indicators[year-2003, 4, 1] <- svymean(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0))
indicators[year-2003, 5, 1] <- svymean(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0))
indicators[year-2003, 6, 1] <- svymean(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0))
indicators[year-2003, 2, 2] <- svymean(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0))
indicators[year-2003, 3, 2] <- svymean(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0))
indicators[year-2003, 4, 2] <- svymean(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0))
# For comparing countries
# svyby(~total.inc, ~as.factor(db020), silc.pd.svy, svymean)
# svyby(~hy010, ~as.factor(db020), silc.hd.svy, svymean)
# Median Income
#
indicators[year-2003, 7, 1] <- svyquantile(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0), quantiles = c(0.5))
indicators[year-2003, 8, 1] <- svyquantile(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0), quantiles = c(0.5))
indicators[year-2003, 9, 1] <- svyquantile(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0), quantiles = c(0.5))
indicators[year-2003, 10, 1] <- svyquantile(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0), quantiles = c(0.5))
indicators[year-2003, 11, 1] <- svyquantile(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0), quantiles = c(0.5))
indicators[year-2003, 7, 2] <- svyquantile(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0), quantiles = c(0.5))
indicators[year-2003, 8, 2] <- svyquantile(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0), quantiles = c(0.5))
indicators[year-2003, 9, 2] <- svyquantile(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0), quantiles = c(0.5))
# For comparing countries
# svyby(~total.inc, ~as.factor(db020), silc.pd.svy,
# svyquantile, ~total.inc, quantiles = c(0.5), keep.var = FALSE)
# svyby(~hy010, ~as.factor(db020), silc.hd.svy,
# svyquantile, ~hy010, quantiles = c(0.5), keep.var = FALSE)
#
# # Decile Points
# #
# svyquantile(~total.inc, silc.pd.svy, quantiles = seq(0, 1, 0.1))
# svyquantile(~hy010, silc.hd.svy, quantiles = seq(0, 1, 0.1))
# # For comparing countries
# svyby(~total.inc, ~as.factor(db020), silc.pd.svy,
# svyquantile, ~total.inc, quantiles = seq(0, 1, 0.1), keep.var = FALSE)
# svyby(~hy010, ~as.factor(hb020), silc.pd.svy,
# svyquantile, ~total.inc, quantiles = seq(0, 1, 0.1), keep.var = FALSE)
# Quantile Share Ratio
#
indicators[year-2003, 12, 1] <- svyqsr(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0), 0.2, 0.8)
indicators[year-2003, 13, 1] <- svyqsr(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0), 0.2, 0.8)
indicators[year-2003, 14, 1] <- svyqsr(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0), 0.2, 0.8)
indicators[year-2003, 15, 1] <- svyqsr(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0), 0.2, 0.8)
indicators[year-2003, 16, 1] <- svyqsr(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0), 0.2, 0.8)
indicators[year-2003, 12, 2] <- svyqsr(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0), 0.2, 0.8)
indicators[year-2003, 13, 2] <- svyqsr(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0), 0.2, 0.8)
indicators[year-2003, 14, 2] <- svyqsr(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0), 0.2, 0.8)
# For comparing countries
# svyby(~total.inc, ~as.factor(db020), silc.pd.svy, svyqsr, 0.2, 0.8)
# svyby(~hy010, ~as.factor(db020), silc.hd.svy, svyqsr, 0.2, 0.8)
# Top 10% Income Share
#
indicators[year-2003, 17, 1] <- svytotal(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income >=
as.numeric(svyquantile(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0), quantile = 0.9)))) /
svytotal(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0))
indicators[year-2003, 18, 1] <- svytotal(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income >=
as.numeric(svyquantile(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0), quantile = 0.9)))) /
svytotal(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0))
indicators[year-2003, 19, 1] <- svytotal(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income >=
as.numeric(svyquantile(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0), quantile = 0.9)))) /
svytotal(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0))
indicators[year-2003, 20, 1] <- svytotal(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed >=
as.numeric(svyquantile(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0), quantile = 0.9)))) /
svytotal(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0))
indicators[year-2003, 21, 1] <- svytotal(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed >=
as.numeric(svyquantile(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0), quantile = 0.9)))) /
svytotal(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0))
indicators[year-2003, 17, 2] <- svytotal(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income >=
as.numeric(svyquantile(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0), quantile = 0.9)))) /
svytotal(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0))
indicators[year-2003, 18, 2] <- svytotal(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income >=
as.numeric(svyquantile(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0), quantile = 0.9)))) /
svytotal(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0))
indicators[year-2003, 19, 2] <- svytotal(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income >=
as.numeric(svyquantile(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0), quantile = 0.9)))) /
svytotal(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0))
# Gini Coefficient
#
indicators[year-2003, 22, 1] <- svygini(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0))
indicators[year-2003, 23, 1] <- svygini(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0))
indicators[year-2003, 24, 1] <- svygini(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0))
indicators[year-2003, 25, 1] <- svygini(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0))
indicators[year-2003, 26, 1] <- svygini(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0))
indicators[year-2003, 22, 2] <- svygini(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0))
indicators[year-2003, 23, 2] <- svygini(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0))
indicators[year-2003, 24, 2] <- svygini(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0))
# For comparing countries
# svyby(~total.inc, ~as.factor(db020), silc.pd.svy, svygini)
# svyby(~hy010, ~as.factor(db020), silc.hd.svy, svygini)
# Theil Index
#
indicators[year-2003, 27, 1] <- svygei(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, equivalent_pre_tax_factor_income > 0), epsilon = 1)
indicators[year-2003, 28, 1] <- svygei(~equivalent_pre_tax_national_income, subset(silc.p1.svy, equivalent_pre_tax_national_income > 0), epsilon = 1)
indicators[year-2003, 29, 1] <- svygei(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, equivalent_post_tax_disposable_income > 0), epsilon = 1)
indicators[year-2003, 30, 1] <- svygei(~equivalent_pre_tax_factor_income_imputed, subset(silc.p1.svy, equivalent_pre_tax_factor_income_imputed > 0), epsilon = 1)
indicators[year-2003, 31, 1] <- svygei(~equivalent_post_tax_disposable_income_imputed, subset(silc.p1.svy, equivalent_post_tax_disposable_income_imputed > 0), epsilon = 1)
indicators[year-2003, 27, 2] <- svygei(~pre_tax_factor_income, subset(silc.p2.svy, pre_tax_factor_income > 0), epsilon = 1)
indicators[year-2003, 28, 2] <- svygei(~pre_tax_national_income, subset(silc.p2.svy, pre_tax_national_income > 0), epsilon = 1)
indicators[year-2003, 29, 2] <- svygei(~post_tax_disposable_income, subset(silc.p2.svy, post_tax_disposable_income > 0), epsilon = 1)
# For comparing countries
# svyby(~total.inc, ~as.factor(db020), silc.pd.svy,
# svygei, epsilon = 1)
# svyby(~hy010, ~as.factor(db020), silc.hd.svy,
# svygei, epsilon = 1)
# Theil Index Decomposition
#
# svygeidec(~equivalent_pre_tax_factor_income, ~rb020,silc.p1.svy, epsilon = 1)
# table(silc.p1.y$rb020)
# calculate sum of total population for all countries
pop.y <- pop %>% filter(time %in% year)
pop_sum <- pop.y %>% filter(geo %in% c(levels(as.factor(silc.p1.y$rb020)))) %>% summarise(pop_sum = sum(values))
# first theil loop p1
for(country in 1:nlevels(as.factor(silc.p1.y$rb020))){
# colnames
colnames(theil)[1+(year-2004)*13] <- paste0("countries",year)
colnames(theil)[2+(year-2004)*13] <- paste0("fi.theil",year)
colnames(theil)[3+(year-2004)*13] <- paste0("fi.mean",year)
colnames(theil)[4+(year-2004)*13] <- paste0("fi.econ.weight",year)
colnames(theil)[5+(year-2004)*13] <- paste0("fi.ineq.share",year)
colnames(theil)[6+(year-2004)*13] <- paste0("ni.theil",year)
colnames(theil)[7+(year-2004)*13] <- paste0("ni.mean",year)
colnames(theil)[8+(year-2004)*13] <- paste0("ni.econ.weight",year)
colnames(theil)[9+(year-2004)*13] <- paste0("ni.ineq.share",year)
colnames(theil)[10+(year-2004)*13] <- paste0("di.theil",year)
colnames(theil)[11+(year-2004)*13] <- paste0("di.mean",year)
colnames(theil)[12+(year-2004)*13] <- paste0("di.econ.weight",year)
colnames(theil)[13+(year-2004)*13] <- paste0("di.ineq.share",year)
# country population
pop_c <- pop.y$values[pop.y$geo==levels(as.factor(silc.p1.y$rb020))[country]]
# country
theil[country,1+(year-2004)*13,1] <- levels(as.factor(silc.p1.y$rb020))[country]
# country theil
theil[country,2+(year-2004)*13,1] <- svygei(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, rb020==levels(as.factor(silc.p1.y$rb020))[country] & equivalent_pre_tax_factor_income > 0), epsilon = 1)
# country mean
theil[country,3+(year-2004)*13,1] <- svymean(~equivalent_pre_tax_factor_income, subset(silc.p1.svy, rb020==levels(as.factor(silc.p1.y$rb020))[country]& equivalent_pre_tax_factor_income > 0))
# country econ weight
theil[country,4+(year-2004)*13,1] <- (pop_c*as.numeric(theil[country,3+(year-2004)*13,1]))/(as.numeric(pop_sum)*as.numeric(indicators[year-2003, 2, 1]))
# country ineq share
#theil[country,5+(year-2004)*13,1] <- (theil[country,4+(year-2004)*13,1]*theil[country,2+(year-2004)*13,1])/
# country theil
theil[country,6+(year-2004)*13,1] <- svygei(~equivalent_pre_tax_national_income, subset(silc.p1.svy, rb020==levels(as.factor(silc.p1.y$rb020))[country]& equivalent_pre_tax_national_income > 0), epsilon = 1)
# country mean
theil[country,7+(year-2004)*13,1] <- svymean(~equivalent_pre_tax_national_income, subset(silc.p1.svy, rb020==levels(as.factor(silc.p1.y$rb020))[country]& equivalent_pre_tax_national_income > 0))
# country econ weight
theil[country,8+(year-2004)*13,1] <- (pop_c*as.numeric(theil[country,7+(year-2004)*13,1]))/(as.numeric(pop_sum)*as.numeric(indicators[year-2003, 2, 1]))
# country theil
theil[country,10+(year-2004)*13,1] <- svygei(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, rb020==levels(as.factor(silc.p1.y$rb020))[country]&equivalent_post_tax_disposable_income>0), epsilon = 1)
# country mean
theil[country,11+(year-2004)*13,1] <- svymean(~equivalent_post_tax_disposable_income, subset(silc.p1.svy, rb020==levels(as.factor(silc.p1.y$rb020))[country]&equivalent_post_tax_disposable_income>0))
# country econ weight
theil[country,12+(year-2004)*13,1] <- (pop_c*as.numeric(theil[country,11+(year-2004)*13,1]))/(as.numeric(pop_sum)*as.numeric(indicators[year-2003, 2, 1]))
}
theil[country+1,1+(year-2004)*13,1] <- "between"
# first theil loop p2
for(country in 1:nlevels(as.factor(silc.p2.y$pb020))){
# country population
pop_c <- pop.y$values[pop.y$geo==levels(as.factor(silc.p2.y$pb020))[country]]
# country
theil[country,1+(year-2004)*13,2] <- levels(as.factor(silc.p2.y$pb020))[country]
# country theil
theil[country,2+(year-2004)*13,2] <- svygei(~pre_tax_factor_income, subset(silc.p2.svy, pb020==levels(as.factor(silc.p2.y$pb020))[country]&pre_tax_factor_income > 0), epsilon = 1)
# country mean
theil[country,3+(year-2004)*13,2] <- svymean(~pre_tax_factor_income, subset(silc.p2.svy, pb020==levels(as.factor(silc.p2.y$pb020))[country]&pre_tax_factor_income > 0))
# country econ weight
theil[country,4+(year-2004)*13,2] <- (pop_c*as.numeric(theil[country,3+(year-2004)*13,2]))/(as.numeric(pop_sum)*as.numeric(indicators[year-2003, 2, 2]))
# country ineq share
#theil[country,5+(year-2004)*13,2] <- (theil[country,4+(year-2004)*13,2]*theil[country,2+(year-2004)*13,2])/
# country theil
theil[country,6+(year-2004)*13,2] <- svygei(~pre_tax_national_income, subset(silc.p2.svy, pb020==levels(as.factor(silc.p2.y$pb020))[country]& pre_tax_national_income > 0), epsilon = 1)
# country mean
theil[country,7+(year-2004)*13,2] <- svymean(~pre_tax_national_income, subset(silc.p2.svy, pb020==levels(as.factor(silc.p2.y$pb020))[country]& pre_tax_national_income > 0))
# country econ weight
theil[country,8+(year-2004)*13,2] <- (pop_c*as.numeric(theil[country,7+(year-2004)*13,2]))/(as.numeric(pop_sum)*as.numeric(indicators[year-2003, 2, 2]))
# country theil
theil[country,10+(year-2004)*13,2] <- svygei(~post_tax_disposable_income, subset(silc.p2.svy, pb020==levels(as.factor(silc.p2.y$pb020))[country]&post_tax_disposable_income>0), epsilon = 1)
# country mean
theil[country,11+(year-2004)*13,2] <- svymean(~post_tax_disposable_income, subset(silc.p2.svy, pb020==levels(as.factor(silc.p2.y$pb020))[country]&post_tax_disposable_income>0))
# country econ weight
theil[country,12+(year-2004)*13,2] <- (pop_c*as.numeric(theil[country,11+(year-2004)*13,2]))/(as.numeric(pop_sum)*as.numeric(indicators[year-2003, 2, 2]))
}
theil[country+1,1+(year-2004)*13,2] <- "between"
# calculate Theil manually
indicators[year-2003, 32, 1] <- t(na.exclude(as.numeric(theil[,2+(year-2004)*13,1])))%*%na.exclude(as.numeric(theil[,4+(year-2004)*13,1])) + t(na.exclude(as.numeric(theil[,4+(year-2004)*13,1])))%*%(log(na.exclude(as.numeric(theil[,3+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 2, 1])))
indicators[year-2003, 33, 1] <- t(na.exclude(as.numeric(theil[,6+(year-2004)*13,1])))%*%na.exclude(as.numeric(theil[,8+(year-2004)*13,1])) + t(na.exclude(as.numeric(theil[,8+(year-2004)*13,1])))%*%(log(na.exclude(as.numeric(theil[,7+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 3, 1])))
indicators[year-2003, 34, 1] <- t(na.exclude(as.numeric(theil[,10+(year-2004)*13,1])))%*%na.exclude(as.numeric(theil[,12+(year-2004)*13,1])) + t(na.exclude(as.numeric(theil[,12+(year-2004)*13,1])))%*%(log(na.exclude(as.numeric(theil[,11+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 4, 1])))
indicators[year-2003, 32, 2] <- t(na.exclude(as.numeric(theil[,2+(year-2004)*13,2])))%*%na.exclude(as.numeric(theil[,4+(year-2004)*13,2])) + t(na.exclude(as.numeric(theil[,4+(year-2004)*13,2])))%*%(log(na.exclude(as.numeric(theil[,3+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 2, 2])))
indicators[year-2003, 33, 2] <- t(na.exclude(as.numeric(theil[,6+(year-2004)*13,2])))%*%na.exclude(as.numeric(theil[,8+(year-2004)*13,2])) + t(na.exclude(as.numeric(theil[,8+(year-2004)*13,2])))%*%(log(na.exclude(as.numeric(theil[,7+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 3, 2])))
indicators[year-2003, 34, 2] <- t(na.exclude(as.numeric(theil[,10+(year-2004)*13,2])))%*%na.exclude(as.numeric(theil[,12+(year-2004)*13,2])) + t(na.exclude(as.numeric(theil[,12+(year-2004)*13,2])))%*%(log(na.exclude(as.numeric(theil[,11+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 4, 2])))
# within component
indicators[year-2003, 37, 1] <- t(na.exclude(as.numeric(theil[,2+(year-2004)*13,1])))%*%na.exclude(as.numeric(theil[,4+(year-2004)*13,1]))
indicators[year-2003, 38, 1] <- t(na.exclude(as.numeric(theil[,6+(year-2004)*13,1])))%*%na.exclude(as.numeric(theil[,8+(year-2004)*13,1]))
indicators[year-2003, 39, 1] <- t(na.exclude(as.numeric(theil[,10+(year-2004)*13,1])))%*%na.exclude(as.numeric(theil[,12+(year-2004)*13,1]))
indicators[year-2003, 37, 2] <- t(na.exclude(as.numeric(theil[,2+(year-2004)*13,2])))%*%na.exclude(as.numeric(theil[,4+(year-2004)*13,2]))
indicators[year-2003, 38, 2] <- t(na.exclude(as.numeric(theil[,6+(year-2004)*13,2])))%*%na.exclude(as.numeric(theil[,8+(year-2004)*13,2]))
indicators[year-2003, 39, 2] <- t(na.exclude(as.numeric(theil[,10+(year-2004)*13,2])))%*%na.exclude(as.numeric(theil[,12+(year-2004)*13,2]))
# between component
indicators[year-2003, 40, 1] <- t(na.exclude(as.numeric(theil[,4+(year-2004)*13,1])))%*%(log(na.exclude(as.numeric(theil[,3+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 2, 1])))
indicators[year-2003, 41, 1] <- t(na.exclude(as.numeric(theil[,8+(year-2004)*13,1])))%*%(log(na.exclude(as.numeric(theil[,7+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 3, 1])))
indicators[year-2003, 42, 1] <- t(na.exclude(as.numeric(theil[,12+(year-2004)*13,1])))%*%(log(na.exclude(as.numeric(theil[,11+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 4, 1])))
indicators[year-2003, 40, 2] <- t(na.exclude(as.numeric(theil[,4+(year-2004)*13,2])))%*%(log(na.exclude(as.numeric(theil[,3+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 2, 2])))
indicators[year-2003, 41, 2] <- t(na.exclude(as.numeric(theil[,8+(year-2004)*13,2])))%*%(log(na.exclude(as.numeric(theil[,7+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 3, 2])))
indicators[year-2003, 42, 2] <- t(na.exclude(as.numeric(theil[,12+(year-2004)*13,2])))%*%(log(na.exclude(as.numeric(theil[,11+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 4, 2])))
#between shares
theil[country+1,5+(year-2004)*13,1] <- as.numeric(indicators[year-2003, 40, 1]) / as.numeric(indicators[year-2003, 32, 1])
theil[country+1,9+(year-2004)*13,1] <- as.numeric(indicators[year-2003, 41, 1]) / as.numeric(indicators[year-2003, 33, 1])
theil[country+1,13+(year-2004)*13,1] <- as.numeric(indicators[year-2003, 42, 1])/ as.numeric(indicators[year-2003, 34, 1])
#between shares
theil[country+1,5+(year-2004)*13,2] <- as.numeric(indicators[year-2003, 40, 2]) / as.numeric(indicators[year-2003, 32, 2])
theil[country+1,9+(year-2004)*13,2] <- as.numeric(indicators[year-2003, 41, 2]) / as.numeric(indicators[year-2003, 33, 2])
theil[country+1,13+(year-2004)*13,2] <- as.numeric(indicators[year-2003, 42, 2])/ as.numeric(indicators[year-2003, 34, 2])
# 2nd theil loop p1 - calculate ineq shares
for(country in 1:nlevels(as.factor(silc.p1.y$rb020))){
# country ineq share
theil[country,5+(year-2004)*13,1] <- (as.numeric(theil[country,4+(year-2004)*13,1])*as.numeric(theil[country,2+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 32, 1])
theil[country,9+(year-2004)*13,1] <- (as.numeric(theil[country,8+(year-2004)*13,1])*as.numeric(theil[country,6+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 33, 1])
theil[country,13+(year-2004)*13,1] <- (as.numeric(theil[country,12+(year-2004)*13,1])*as.numeric(theil[country,10+(year-2004)*13,1]))/as.numeric(indicators[year-2003, 34, 1])
}
# 2nd theil loop p2 - calculate ineq shares
for(country in 1:nlevels(as.factor(silc.p2.y$pb020))){
# country ineq share
theil[country,5+(year-2004)*13,2] <- (as.numeric(theil[country,4+(year-2004)*13,2])*as.numeric(theil[country,2+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 32, 2])
theil[country,9+(year-2004)*13,2] <- (as.numeric(theil[country,8+(year-2004)*13,2])*as.numeric(theil[country,6+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 33, 2])
theil[country,13+(year-2004)*13,2] <- (as.numeric(theil[country,12+(year-2004)*13,2])*as.numeric(theil[country,10+(year-2004)*13,2]))/as.numeric(indicators[year-2003, 34, 2])
}
#
}
# create dataframes with indicators for p1 and p2 and name them
indicators.p1 <- as.data.frame(indicators[,,1])
indicators.p2 <- as.data.frame(indicators[,,2])
theil.p1 <- as.data.frame(theil[,,1])
theil.p2 <- as.data.frame(theil[,,2])
indicators.p1 <- indicators.p1 %>% rename(year = V1, mean_eptfi = V2, mean_eptni = V3, mean_eptdi = V4, mean_eptfii = V5, mean_eptdii = V6, median_eptfi = V7, median_eptni = V8, median_eptdi = V9, median_eptfii = V10, median_eptdii = V11, qsr8020_eptfi = V12, qsr8020_eptni = V13, qsr8020_eptdi = V14, qsr8020_eptfii = V15, qsr8020_eptdii = V16, top10_eptfi = V17, top10_eptni = V18, top10_eptdi = V19, top10_eptfii = V20, top10_eptdii = V21, gini_eptfi = V22, gini_eptni = V23, gini_eptdi = V24, gini_eptfii = V25, gini_eptdii = V26, theil_eptfi = V27, theil_eptni = V28, theil_eptdi = V29, theil_eptfii = V30, theil_eptdii = V31, theil_fi_manual = V32, theil_ni_manual = V33, theil_di_manual = V34, countries = V35, ncountries = V36, theil_fi_within = V37, theil_ni_within = V38, theil_di_within = V39, theil_fi_between = V40, theil_ni_between = V41, theil_di_between = V42)
indicators.p2 <- indicators.p2 %>% rename(year = V1, mean_ptfi = V2, mean_ptni = V3, mean_ptdi = V4, mean_ptfii = V5, mean_ptdii = V6, median_ptfi = V7, median_ptni = V8, median_ptdi = V9, median_ptfii = V10, median_ptdii = V11, qsr8020_ptfi = V12, qsr8020_ptni = V13, qsr8020_ptdi = V14, qsr8020_ptfii = V15, qsr8020_ptdii = V16, top10_ptfi = V17, top10_ptni = V18, top10_ptdi = V19, top10_ptfii = V20, top10_ptdii = V21, gini_ptfi = V22, gini_ptni = V23, gini_ptdi = V24, gini_ptfii = V25, gini_ptdii = V26, theil_ptfi = V27, theil_ptni = V28, theil_ptdi = V29, theil_ptfii = V30, theil_ptdii = V31, theil_fi_manual = V32, theil_ni_manual = V33, theil_di_manual = V34, countries = V35, ncountries = V36, theil_fi_within = V37, theil_ni_within = V38, theil_di_within = V39, theil_fi_between = V40, theil_ni_between = V41, theil_di_between = V42)
# calculate theil between ineq share
indicators.p1 <- indicators.p1 %>% mutate(theil_between_share_fi = as.numeric(as.character(theil_fi_between)) / as.numeric(as.character(theil_fi_manual)), theil_between_share_ni = as.numeric(as.character(theil_ni_between)) / as.numeric(as.character(theil_ni_manual)), theil_between_share_di = as.numeric(as.character(theil_di_between)) / as.numeric(as.character(theil_di_manual)))
indicators.p2 <- indicators.p2 %>% mutate(theil_between_share_fi = as.numeric(as.character(theil_fi_between)) / as.numeric(as.character(theil_fi_manual)), theil_between_share_ni = as.numeric(as.character(theil_ni_between)) / as.numeric(as.character(theil_ni_manual)), theil_between_share_di = as.numeric(as.character(theil_di_between)) / as.numeric(as.character(theil_di_manual)))
indicators.p1 <- indicators.p1 %>% select(-(V43:V50))
indicators.p2 <- indicators.p2 %>% select(-(V43:V50))
# check proportions -------------------------------------------------------
#
#
# table(silc.h.store$hb020[which(silc.h.store$hb010==2017)])
# cw <- silc.p1.y %>% filter(equivalent_post_tax_disposable_income>0) %>% group_by(rb020) %>% summarise(sum_cweight = sum(rb050))
# cw <- cw %>% mutate(propcw = sum_cweight/sum(sum_cweight))
#
# cwj <- left_join(cw, pop.nobs, by = c("rb020"="geo"))
# save data ---------------------------------------------------------------
save(indicators.p1, indicators.p2, theil.p1, theil.p2, file = "./data/eu28_indicators.RData")
|
#Fazer Stack com diferente extents
#Eduardo Q Marques
#11/07/2019
library(raster)
#Landsat - 5 ==============================================================================================
#Antes de fazermos a lista para o stack, precisamos cortar as bandas para ficaram no mesmo extent
#Abrir cena cortada para referencia
ref <- raster('C:/Users/Eduardo Q Marques/Documents/My Jobs/Doutorado/Deposito/Banco de Dados Tanguro/Landsat/Extent_raster/Extent_raster.tif')
e <- extent(ref)
#Criar uma pasta para salvar as bandas recortadas
outpath <- "C:/Users/Eduardo Q Marques/Documents/landsatcrop5/"
dir.create(outpath)
#Criar lista com as bandas
setwd("C:/Users/Eduardo Q Marques/Documents/My Jobs/Doutorado/Deposito/Banco de Dados Tanguro/Landsat/Landsat5")
files <- list.files(pattern=".tif$")
#Adicionar o diretorio de saida e adicionar extencao do arquivo
outfiles <- paste0(outpath, files)
extension(outfiles) <- 'tif'
#Função de corte
for(i in 1:length(files)) {
r <-raster(files[i])
rc <- crop(r, e)
rc <- writeRaster(rc, outfiles[i])
}
#Carregar bandas Landsat cortadas============================================================================
#Listar as bandas (menos a banda 6)
list1 <- list.files(path="C:/Users/Eduardo Q Marques/Documents/landsatcrop5", pattern = "band1.tif$", full.names=TRUE,recursive=TRUE)
list2 <- list.files(path="C:/Users/Eduardo Q Marques/Documents/landsatcrop5", pattern = "band2.tif$", full.names=TRUE,recursive=TRUE)
list3 <- list.files(path="C:/Users/Eduardo Q Marques/Documents/landsatcrop5", pattern = "band3.tif$", full.names=TRUE,recursive=TRUE)
list4 <- list.files(path="C:/Users/Eduardo Q Marques/Documents/landsatcrop5", pattern = "band4.tif$", full.names=TRUE,recursive=TRUE)
list5 <- list.files(path="C:/Users/Eduardo Q Marques/Documents/landsatcrop5", pattern = "band5.tif$", full.names=TRUE,recursive=TRUE)
list7 <- list.files(path="C:/Users/Eduardo Q Marques/Documents/landsatcrop5", pattern = "band7.tif$", full.names=TRUE,recursive=TRUE)
#Fazer os stacks das bandas
b1 <- stack(list1)
b2 <- stack(list2)
b3 <- stack(list3)
b4 <- stack(list4)
b5 <- stack(list5)
b7 <- stack(list7) | /Doutorado/Chapter-1/PhD Geographic scripts/Fazer Stack com diferente extents.R | no_license | Eduardoqm/Science-Repository | R | false | false | 2,150 | r | #Fazer Stack com diferente extents
#Eduardo Q Marques
#11/07/2019
library(raster)
#Landsat - 5 ==============================================================================================
#Antes de fazermos a lista para o stack, precisamos cortar as bandas para ficaram no mesmo extent
#Abrir cena cortada para referencia
ref <- raster('C:/Users/Eduardo Q Marques/Documents/My Jobs/Doutorado/Deposito/Banco de Dados Tanguro/Landsat/Extent_raster/Extent_raster.tif')
e <- extent(ref)
#Criar uma pasta para salvar as bandas recortadas
outpath <- "C:/Users/Eduardo Q Marques/Documents/landsatcrop5/"
dir.create(outpath)
#Criar lista com as bandas
setwd("C:/Users/Eduardo Q Marques/Documents/My Jobs/Doutorado/Deposito/Banco de Dados Tanguro/Landsat/Landsat5")
files <- list.files(pattern=".tif$")
#Adicionar o diretorio de saida e adicionar extencao do arquivo
outfiles <- paste0(outpath, files)
extension(outfiles) <- 'tif'
#Função de corte
for(i in 1:length(files)) {
r <-raster(files[i])
rc <- crop(r, e)
rc <- writeRaster(rc, outfiles[i])
}
#Carregar bandas Landsat cortadas============================================================================
#Listar as bandas (menos a banda 6)
list1 <- list.files(path="C:/Users/Eduardo Q Marques/Documents/landsatcrop5", pattern = "band1.tif$", full.names=TRUE,recursive=TRUE)
list2 <- list.files(path="C:/Users/Eduardo Q Marques/Documents/landsatcrop5", pattern = "band2.tif$", full.names=TRUE,recursive=TRUE)
list3 <- list.files(path="C:/Users/Eduardo Q Marques/Documents/landsatcrop5", pattern = "band3.tif$", full.names=TRUE,recursive=TRUE)
list4 <- list.files(path="C:/Users/Eduardo Q Marques/Documents/landsatcrop5", pattern = "band4.tif$", full.names=TRUE,recursive=TRUE)
list5 <- list.files(path="C:/Users/Eduardo Q Marques/Documents/landsatcrop5", pattern = "band5.tif$", full.names=TRUE,recursive=TRUE)
list7 <- list.files(path="C:/Users/Eduardo Q Marques/Documents/landsatcrop5", pattern = "band7.tif$", full.names=TRUE,recursive=TRUE)
#Fazer os stacks das bandas
b1 <- stack(list1)
b2 <- stack(list2)
b3 <- stack(list3)
b4 <- stack(list4)
b5 <- stack(list5)
b7 <- stack(list7) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mixed_proteomes.R
\name{plot_boxplots}
\alias{plot_boxplots}
\title{Get species tags from a species column.}
\usage{
plot_boxplots(meds, sampleComposition)
}
\arguments{
\item{meds}{Median intensities to plot.}
\item{sampleComposition}{composition of spiked in samples (with column ratios)}
}
\value{
a ggplot showing the ratios of intensities in two different conditions
}
\description{
This works on the output of 'get_ratios_of_medians'.
}
| /man/plot_boxplots.Rd | no_license | MatteoLacki/LFQBench2 | R | false | true | 522 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mixed_proteomes.R
\name{plot_boxplots}
\alias{plot_boxplots}
\title{Get species tags from a species column.}
\usage{
plot_boxplots(meds, sampleComposition)
}
\arguments{
\item{meds}{Median intensities to plot.}
\item{sampleComposition}{composition of spiked in samples (with column ratios)}
}
\value{
a ggplot showing the ratios of intensities in two different conditions
}
\description{
This works on the output of 'get_ratios_of_medians'.
}
|
#' @title Plot ROC curves
#'
#' @description This function plots ROC curves from the results of the \code{\link{assess_models}} function
#'
#' @param ... Output(s) of the \code{\link{language_model}}, \code{\link{comparison_model}}, or \code{\link{test_language_model}} functions
#' @param individual_plot If TRUE, graphs individual ROC curves for each model. Defaults to TRUE.
#' @param combined_plot If TRUE, and modelAssessment contains multiple models, graphs a plot with all ROC curves overlapping. Defaults to TRUE.
#' @param facet_plot If TRUE, and modelAssessment contains multiple models, graphs a faceted plot with all ROC curves included. Defaults to TRUE.
#' @param facet_summary If TRUE, and modelAssessment contains multiple models, the facet_plot will include a plot with all ROC curves overlapping. Defaults to TRUE.
#' @param colors A vector of colors to use for each model's ROC curve.
#' @param model_names A vector of strings to use as titles/names for each model.
#' @param plot_auc_polygon If TRUE, the area below with ROC curve with the lowest AUC will be shaded in. Defaults to TRUE.
#' @param plot_ci If TRUE, a confidence band will be plotted around each ROC curve. Defaults to TRUE.
#' @param line_size A numeric representing the width of the ROC curve line. Defaults to 1.
#' @param print_auc If TRUE, the value of the AUC will be printed on the plot. Defaults to TRUE.
#' @param print_ci If TRUE, the range of the confidence interval will be printed on the plot. Defaults to TRUE.
#' @param print_auc_ci_font_size The font size for printed values for the AUC and confidence interval. Defaults to 4.
#' @param print_auc_ci_x A vector of x (horizontal) positions determining where on the plot the AUC and confidence interval values will be printed.
#' @param print_auc_ci_y A vector of y (vertical) positions determining where on the plot the AUC and confidence interval values will be printed.
#' @param plot_legend If TRUE, a legend will be printed on all plots.
#' @param plot_title The title of the plot
#' @param facet_n_row The number of rows used to plot the facet_plot. Defaults to NULL.
#' @param facet_n_col The number of columns used to plot the facet_plot. Defaults to 2.
#' @param legend_spacing If TRUE, there will be spacing between the legend items. Defaults to FALSE.
#'
#' @return Nothing (this function plots a series of graphs)
#'
#' @seealso \code{\link{language_model}}, \code{\link{comparison_model}}, \code{\link{test_language_model}}
#'
#' @import ggplot2 scales
#' @importFrom rlang .data
#'
#' @export
#'
#' @examples
#'
#' \dontrun{
#' strong_movie_review_data$cleanText = clean_text(strong_movie_review_data$text)
#' mild_movie_review_data$cleanText = clean_text(mild_movie_review_data$text)
#'
#' # Using language to predict "Positive" vs. "Negative" reviews
#' # Only for strong reviews (ratings of 1 or 10)
#' movie_model_strong = language_model(strong_movie_review_data,
#' outcome = "valence",
#' outcomeType = "binary",
#' text = "cleanText",
#' progressBar = FALSE)
#'
#' # Using language to predict "Positive" vs. "Negative" reviews
#' # Only for mild reviews (ratings of 4 or 7)
#' movie_model_mild = language_model(mild_movie_review_data,
#' outcome = "valence",
#' outcomeType = "binary",
#' text = "cleanText",
#' progressBar = FALSE)
#'
#'
#' # Plot ROC curves
#' plot_roc(movie_model_strong, movie_model_mild)
#' }
plot_roc = function(..., individual_plot=TRUE, combined_plot=TRUE, facet_plot=TRUE, facet_summary=TRUE, colors, model_names, plot_auc_polygon=TRUE, plot_ci=TRUE, line_size=1, print_auc=TRUE, print_ci=TRUE, print_auc_ci_font_size=4, print_auc_ci_x, print_auc_ci_y, plot_legend=TRUE, plot_title, facet_n_row=NULL, facet_n_col=2, legend_spacing=FALSE) {
facet=model=polygon.x=polygon.y=specificities=sensitivities=percent2p5=percent97p5=label_text=x=y=NULL
dots = list(...)
dots_names = match.call(expand.dots = FALSE)
for (i in 1:length(dots)) {
input = dots[[i]]
if (!(class(input) %in% c("langModel", "compModel", "testAssessment"))) {
stop(paste0("Your argument '", as.character(dots_names$...[[i]]), "'must be a model generated by either the `language_model` or 'comparison_model` functions."))
}
if (input@type != "binary") {
stop(paste0("ROCs can only be plotted for models with a binary outcome variable (`",as.character(dots_names$...[[i]]),"` does not have a binary outcome)."))
}
if (class(input) == "testAssessment") {
if (!is.vector(as.character(dots_names$...))) {
namelist = c(as.character(dots_names$...))
}
else {
namelist = as.character(dots_names$...)
}
if (!input@trainedModel %in% namelist){
result = askYesNo(paste0("`",namelist[i],"` is the outcome of testing a model on new data, but the original model (`",input@trainedModel,"`) has not been included. Are you sure you want to continue without including it?"))
if (is.na(result)) {
stop("Function aborted.")
}
if (!result) {
stop("Function aborted.")
}
}
}
}
model_labels = data.frame(matrix(ncol=4,nrow=0))
colnames(model_labels) = c("name", "auc", "ci_lower", "ci_upper")
roc_plot_data = data.frame(matrix(ncol=2,nrow=0))
colnames(roc_plot_data) = c("specificities", "sensitivities")
roc_ci_plot_data = data.frame(matrix(ncol=4, nrow=0))
colnames(roc_ci_plot_data) = c("percent2p5", "percent50", "percent97p5", "sensitivities")
roc_list = list()
for (i in 1:length(dots)) {
input = dots[[i]]
roc_data = input@roc
roc_list[[as.character(dots_names$...[[i]])]] = roc_data
roc_ci_data = input@roc_ci
roc_data_formatted = data.frame(specificities=roc_data$specificities, sensitivities=roc_data$sensitivities)
roc_data_formatted$model = as.character(dots_names$...[[i]])
roc_ci_data_formatted = as.data.frame(roc_ci_data)
roc_ci_data_formatted$sensitivities = as.numeric(rownames(roc_ci_data_formatted))
colnames(roc_ci_data_formatted) = c("percent2p5", "percent50", "percent97p5", "sensitivities")
roc_ci_data_formatted$model = as.character(dots_names$...[[i]])
roc_plot_data = rbind(roc_plot_data, roc_data_formatted)
roc_ci_plot_data = rbind(roc_ci_plot_data, roc_ci_data_formatted)
temp_frame = data.frame(name=as.character(dots_names$...[[i]]), auc = roc_data$auc, ci_lower = roc_data$ci[1], ci_upper = roc_data$ci[3])
model_labels = rbind(model_labels, temp_frame)
}
roc_plot_data$model = factor(roc_plot_data$model, levels = model_labels$name)
roc_ci_plot_data$model = factor(roc_ci_plot_data$model, levels = model_labels$name)
lowest_auc_model = model_labels$name[which.min(model_labels$auc)]
box_df = data.frame(polygon.x = c(0,0,1,1,0), polygon.y = c(0,1,1,0,0))
auc_df = data.frame(matrix(ncol=3,nrow=0))
colnames(auc_df) = c("sensitivities", "specificities", "model")
for (j in 1:nrow(model_labels)) {
auc_df_add = subset(roc_plot_data, model == model_labels$name[j])
auc_extra = data.frame(sensitivities = c(1,0,0), specificities = c(0,0,1), model=rep(model_labels$name[j],3))
auc_df = rbind(auc_df, auc_df_add, auc_extra)
}
auc_labels = data.frame(matrix(ncol=4,nrow=0))
colnames(auc_labels) = c("model", "label_text", "x", "y")
for (j in 1:nrow(model_labels)) {
model = model_labels$name[j]
label_text = paste("AUC: ",round(model_labels$auc[j],3), "\nCI: (", round(model_labels$ci_lower[j],3), "-", round(model_labels$ci_upper[j],3),")",sep="")
temp_frame = data.frame(model=model, label_text=label_text, x=.4, y=.6-(.1*j))
auc_labels = rbind(auc_labels, temp_frame)
}
roc_plot_data_all = CreateAllFacet(roc_plot_data, "model")
roc_ci_plot_data_all = CreateAllFacet(roc_ci_plot_data, "model")
auc_df$facet = auc_df$model
auc_df_add = subset(roc_plot_data, model == lowest_auc_model)
auc_df_add$facet = "all"
auc_extra = data.frame(sensitivities = c(1,0,0), specificities = c(0,0,1), model=rep(model_labels$name[j],3), facet=rep("all",3))
auc_df = rbind(auc_df, auc_df_add, auc_extra)
roc_plot_data_all$facet = factor(roc_plot_data_all$facet, levels = c(model_labels$name, "all"))
roc_ci_plot_data_all$facet = factor(roc_ci_plot_data_all$facet, levels = c(model_labels$name, "all"))
auc_df$facet = factor(auc_df$facet, levels = c(model_labels$name, "all"))
auc_labels$facet = "all"
auc_labels_new = auc_labels
auc_labels_new$facet = auc_labels_new$model
auc_labels_new$y=max(auc_labels_new$y)
auc_labels = rbind(auc_labels_new, auc_labels)
auc_labels$facet = factor(auc_labels$facet, levels = c(model_labels$name, "all"))
original_auc_polygon_df=auc_df
original_roc_ci_df=roc_ci_plot_data_all
original_roc_curve_df=roc_plot_data_all
original_auc_ci_labels_df=auc_labels
if (nrow(model_labels) == 1) {
combined_plot=FALSE
facet_plot=FALSE
}
if(!individual_plot & !combined_plot & !facet_plot) {
stop("Given that all of the following arguments are FALSE, no plots will be printed: `individual_plot`, `combined_plot`, `facet_plot`")
}
box_df = data.frame(polygon.x = c(0,0,1,1,0), polygon.y = c(0,1,1,0,0))
if(individual_plot) {
for (i in 1:nrow(model_labels)) {
this_model = model_labels$name[i]
auc_polygon_df = subset(original_auc_polygon_df, facet != "all")
auc_polygon_df = subset(auc_polygon_df, model == this_model)
roc_ci_df = subset(original_roc_ci_df, facet != "all")
roc_ci_df = subset(roc_ci_df, model == this_model)
roc_curve_df = subset(original_roc_curve_df, facet != "all")
roc_curve_df = subset(roc_curve_df, model == this_model)
auc_ci_labels_df = subset(original_auc_ci_labels_df, facet != "all")
auc_ci_labels_df = subset(auc_ci_labels_df, model == this_model)
lowest_auc_model = model_labels$name[which.min(model_labels$auc)]
p = ggplot() +
geom_path(data=box_df, aes(x=polygon.x, y=polygon.y))
if(plot_auc_polygon) {
p = p + geom_polygon(data=auc_polygon_df, aes(x=specificities, y=sensitivities), alpha=.3)
}
if(!missing(colors)) {
if (nrow(model_labels) != length(colors)) {
stop("The length of the argument `colors` must be equal to the number of models in your assessment.")
}
this_color = colors[i]
}
else {
this_color = hue_pal()(nrow(model_labels))[i]
}
if(plot_ci) {
if(!legend_spacing) {
p = p + geom_ribbon(data=roc_ci_df, aes(xmin=percent2p5, xmax=percent97p5, y=sensitivities), fill=this_color, color=this_color, alpha=.4, size=.4)
}
else {
p = p + geom_ribbon(data=roc_ci_df, aes(xmin=percent2p5, xmax=percent97p5, y=sensitivities), fill=this_color, color=this_color, alpha=.4, size=.4, key_glyph="polygon4")
}
}
if(!legend_spacing) {
p = p + geom_line(data=roc_curve_df, aes(x=specificities, y=sensitivities), color=this_color, size=line_size)
}
else {
p = p + geom_line(data=roc_curve_df, aes(x=specificities, y=sensitivities), color=this_color, size=line_size, key_glyph="path4")
}
p = p + geom_segment(aes(x=1, xend=0, y=0, yend=1), linetype="dashed")
if(!missing(print_auc_ci_x)) {
if (!print_auc & !print_ci) {
warning("The argument `print_auc_ci_x` will not be used if the AUC and CI values are not being printed.")
}
if (length(print_auc_ci_x) > 1) {
auc_ci_labels_df$x = print_auc_ci_x[1]
}
else {
auc_ci_labels_df$x = print_auc_ci_x
}
}
if(!missing(print_auc_ci_y)) {
if (!print_auc & !print_ci) {
warning("The argument `print_auc_ci_y` will not be used if the AUC and CI values are not being printed.")
}
if (length(print_auc_ci_y) > 1) {
auc_ci_labels_df$y = print_auc_ci_y[1]
}
else {
auc_ci_labels_df$y = print_auc_ci_y
}
}
if(print_auc & print_ci) {
p = p + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y), color=this_color, size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
if (!print_ci & print_auc) {
temp_obj = unlist(strsplit(auc_ci_labels_df$label_text, split="\n"))
n <- length(temp_obj)
auc_ci_labels_df$label_text = c(temp_obj[seq(n) %% 2 == 1])
p = p + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y), color=this_color, size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
if (!print_auc & print_ci) {
temp_obj = unlist(strsplit(auc_ci_labels_df$label_text, split="\n"))
n <- length(temp_obj)
auc_ci_labels_df$label_text = c(rev(temp_obj[seq(n) %% 2 == 0]))
p = p + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y), color=this_color, size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
p = p +
scale_x_reverse() +
labs(x="Sensitivity", y="Specificity", color="Model", fill="Model") +
theme_bw() +
coord_fixed() +
theme(legend.position = "none")
if(!missing(plot_title)) {
if (nrow(model_labels) != length(plot_title)) {
stop("If `individual_plots` is TRUE, the length of the argument `plot_title` must be equal to the number of models in your assessment.")
}
p = p + ggtitle(plot_title[i])
}
else if(!missing(model_names)) {
if (nrow(model_labels) != length(model_names)) {
stop("The length of the argument `model_names` must be equal to the number of models in your assessment.")
}
p = p + ggtitle(model_names[i])
}
else {
p = p + ggtitle(model_labels$name[i])
}
if(legend_spacing) {
p = p + theme(legend.key = element_rect(color = NA, fill = NA),
legend.key.size = unit(1.2, "cm"))
}
plot(p)
}
}
if(combined_plot) {
auc_polygon_df = subset(original_auc_polygon_df, facet != "all")
roc_ci_df = subset(original_roc_ci_df, facet != "all")
roc_curve_df = subset(original_roc_curve_df, facet != "all")
auc_ci_labels_df = subset(original_auc_ci_labels_df, facet == "all")
lowest_auc_model = model_labels$name[which.min(model_labels$auc)]
p = ggplot() +
geom_path(data=box_df, aes(x=polygon.x, y=polygon.y))
if(plot_auc_polygon) {
p = p + geom_polygon(data=subset(auc_polygon_df, model == lowest_auc_model), aes(x=specificities, y=sensitivities), alpha=.3)
}
if(plot_ci) {
if(!legend_spacing) {
p = p + geom_ribbon(data=roc_ci_df, aes(xmin=percent2p5, xmax=percent97p5, y=sensitivities, fill=model, color=model), alpha=.4, size=.4)
}
else {
p = p + geom_ribbon(data=roc_ci_df, aes(xmin=percent2p5, xmax=percent97p5, y=sensitivities, fill=model, color=model), alpha=.4, size=.4, key_glyph="polygon4")
}
}
if(!legend_spacing) {
p = p + geom_line(data=roc_curve_df, aes(x=specificities, y=sensitivities, color=model), size=line_size)
}
else {
p = p + geom_line(data=roc_curve_df, aes(x=specificities, y=sensitivities, color=model), size=line_size, key_glyph="path4")
}
p = p + geom_segment(aes(x=1, xend=0, y=0, yend=1), linetype="dashed")
if(!missing(print_auc_ci_x)) {
if (!print_auc & !print_ci) {
warning("The argument `print_auc_ci_x` will not be used if the AUC and CI values are not being printed.")
}
if (nrow(model_labels) != length(print_auc_ci_x)) {
stop("The length of the argument `print_auc_ci_x` must be equal to the number of models in your assessment.")
}
auc_ci_labels_df$x = print_auc_ci_x
}
if(!missing(print_auc_ci_y)) {
if (!print_auc & !print_ci) {
warning("The argument `print_auc_ci_y` will not be used if the AUC and CI values are not being printed.")
}
if (nrow(model_labels) != length(print_auc_ci_y)) {
stop("The length of the argument `print_auc_ci_y` must be equal to the number of models in your assessment.")
}
auc_ci_labels_df$y = print_auc_ci_y
}
if(print_auc & print_ci) {
p = p + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y, color=model), size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
if (!print_ci & print_auc) {
temp_obj = unlist(strsplit(auc_ci_labels_df$label_text, split="\n"))
n <- length(temp_obj)
auc_ci_labels_df$label_text = c(temp_obj[seq(n) %% 2 == 1])
p = p + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y, color=model), size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
if (!print_auc & print_ci) {
temp_obj = unlist(strsplit(auc_ci_labels_df$label_text, split="\n"))
n <- length(temp_obj)
auc_ci_labels_df$label_text = c(rev(temp_obj[seq(n) %% 2 == 0]))
p = p + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y, color=model), size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
p = p +
scale_x_reverse() +
labs(x="Sensitivity", y="Specificity", color="Model", fill="Model") +
theme_bw() +
coord_fixed()
if(!plot_legend) {
p = p + theme(legend.position = "none")
}
if(!missing(plot_title)) {
p = p + ggtitle(plot_title)
}
if(!missing(colors) & !missing(model_names)) {
if (nrow(model_labels) != length(colors)) {
stop("The length of the argument `colors` must be equal to the number of models in your assessment.")
}
if (nrow(model_labels) != length(model_names)) {
stop("The length of the argument `model_names` must be equal to the number of models in your assessment.")
}
p = p + scale_fill_manual(values=colors, labels=model_names)
p = p + scale_color_manual(values=colors, labels=model_names)
}
if(!missing(colors) & missing(model_names)) {
if (nrow(model_labels) != length(colors)) {
stop("The length of the argument `colors` must be equal to the number of models in your assessment.")
}
p = p + scale_fill_manual(values=colors)
p = p + scale_color_manual(values=colors)
}
if(!missing(model_names) & missing(colors)) {
if (nrow(model_labels) != length(model_names)) {
stop("The length of the argument `model_names` must be equal to the number of models in your assessment.")
}
p = p + scale_fill_discrete(labels=model_names)
p = p + scale_color_discrete(labels=model_names)
}
if(legend_spacing) {
p = p + theme(legend.key = element_rect(color = NA, fill = NA),
legend.key.size = unit(1.2, "cm"))
}
plot(p)
}
if(facet_plot) {
if (facet_summary) {
auc_polygon_df = original_auc_polygon_df
roc_ci_df = original_roc_ci_df
roc_curve_df = original_roc_curve_df
auc_ci_labels_df = original_auc_ci_labels_df
}
else {
auc_polygon_df = subset(original_auc_polygon_df, facet != "all")
roc_ci_df = subset(original_roc_ci_df, facet != "all")
roc_curve_df = subset(original_roc_curve_df, facet != "all")
auc_ci_labels_df = subset(original_auc_ci_labels_df, facet != "all")
}
lowest_auc_model = model_labels$name[which.min(model_labels$auc)]
q = ggplot(data=roc_curve_df) +
geom_path(data=box_df, aes(x=polygon.x, y=polygon.y))
if(plot_auc_polygon) {
q = q + geom_polygon(data=auc_polygon_df, aes(x=specificities, y=sensitivities), alpha=.3)
}
if(plot_ci) {
if(!legend_spacing) {
q = q + geom_ribbon(data=roc_ci_df, aes(xmin=percent2p5, xmax=percent97p5, y=sensitivities, fill=model, color=model), alpha=.4, size=.4)
}
else {
q = q + geom_ribbon(data=roc_ci_df, aes(xmin=percent2p5, xmax=percent97p5, y=sensitivities, fill=model, color=model), alpha=.4, size=.4, key_glyph="polygon4")
}
}
if(!legend_spacing) {
q = q + geom_line(data=roc_curve_df, aes(x=specificities, y=sensitivities, color=model), size=line_size)
}
else {
q = q + geom_line(data=roc_curve_df, aes(x=specificities, y=sensitivities, color=model), size=line_size, key_glyph="path4")
}
q = q + geom_segment(aes(x=1, xend=0, y=0, yend=1), linetype="dashed")
if(!missing(print_auc_ci_x)) {
if (!print_auc & !print_ci) {
warning("The argument `print_auc_ci_x` will not be used if the AUC and CI values are not being printed.")
}
if (facet_summary & nrow(model_labels) != length(print_auc_ci_x)) {
stop("The length of the argument `print_auc_ci_x` must be equal to the number of models in your assessment.")
}
else if (!facet_summary & length(print_auc_ci_x) != 1) {
warning("When `facet_summary` is FALSE, only the first value of `print_auc_ci_x` will be used for faceted graphs.")
}
auc_ci_labels_df$x = print_auc_ci_x
}
if(!missing(print_auc_ci_y)) {
if (!print_auc & !print_ci) {
warning("The argument `print_auc_ci_y` will not be used if the AUC and CI values are not being printed.")
}
if (facet_summary & nrow(model_labels) != length(print_auc_ci_y)) {
stop("The length of the argument `print_auc_ci_y` must be equal to the number of models in your assessment.")
}
else if (!facet_summary & length(print_auc_ci_y) != 1) {
warning("When `facet_summary` is FALSE, only the first value of `print_auc_ci_y` will be used for faceted graphs.")
}
auc_ci_labels_df$y = print_auc_ci_y
}
if(print_auc & print_ci) {
q = q + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y, color=model), size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
if (!print_ci & print_auc) {
temp_obj = unlist(strsplit(auc_ci_labels_df$label_text, split="\n"))
n <- length(temp_obj)
auc_ci_labels_df$label_text = c(temp_obj[seq(n) %% 2 == 1])
q = q + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y, color=model), size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
if (!print_auc & print_ci) {
temp_obj = unlist(strsplit(auc_ci_labels_df$label_text, split="\n"))
n <- length(temp_obj)
auc_ci_labels_df$label_text = c(rev(temp_obj[seq(n) %% 2 == 0]))
q = q + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y, color=model), size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
q = q +
facet_wrap(~facet, ncol=facet_n_col, nrow=facet_n_row) +
# facet_wrap(~facet) +
scale_x_reverse() +
labs(x="Sensitivity", y="Specificity", color="Model", fill="Model") +
theme_bw() +
coord_fixed()
if(!plot_legend) {
q = q + theme(legend.position = "none")
}
if(!missing(plot_title)) {
q = q + ggtitle(plot_title)
}
if(!missing(colors) & !missing(model_names)) {
if (nrow(model_labels) != length(colors)) {
stop("The length of the argument `colors` must be equal to the number of models in your assessment.")
}
if (nrow(model_labels) != length(model_names)) {
stop("The length of the argument `model_names` must be equal to the number of models in your assessment.")
}
q = q + scale_fill_manual(values=colors, labels=model_names)
q = q + scale_color_manual(values=colors, labels=model_names)
}
if(!missing(colors) & missing(model_names)) {
if (nrow(model_labels) != length(colors)) {
stop("The length of the argument `colors` must be equal to the number of models in your assessment.")
}
q = q + scale_fill_manual(values=colors)
q = q + scale_color_manual(values=colors)
}
if(!missing(model_names) & missing(colors)) {
if (nrow(model_labels) != length(model_names)) {
stop("The length of the argument `model_names` must be equal to the number of models in your assessment.")
}
q = q + scale_fill_discrete(labels=model_names)
q = q + scale_color_discrete(labels=model_names)
}
if(legend_spacing) {
q = q + theme(legend.key = element_rect(color = NA, fill = NA),
legend.key.size = unit(1.2, "cm"))
}
plot(q)
}
}
| /R/plot_roc.R | no_license | nlanderson9/languagePredictR | R | false | false | 24,650 | r | #' @title Plot ROC curves
#'
#' @description This function plots ROC curves from the results of the \code{\link{assess_models}} function
#'
#' @param ... Output(s) of the \code{\link{language_model}}, \code{\link{comparison_model}}, or \code{\link{test_language_model}} functions
#' @param individual_plot If TRUE, graphs individual ROC curves for each model. Defaults to TRUE.
#' @param combined_plot If TRUE, and modelAssessment contains multiple models, graphs a plot with all ROC curves overlapping. Defaults to TRUE.
#' @param facet_plot If TRUE, and modelAssessment contains multiple models, graphs a faceted plot with all ROC curves included. Defaults to TRUE.
#' @param facet_summary If TRUE, and modelAssessment contains multiple models, the facet_plot will include a plot with all ROC curves overlapping. Defaults to TRUE.
#' @param colors A vector of colors to use for each model's ROC curve.
#' @param model_names A vector of strings to use as titles/names for each model.
#' @param plot_auc_polygon If TRUE, the area below with ROC curve with the lowest AUC will be shaded in. Defaults to TRUE.
#' @param plot_ci If TRUE, a confidence band will be plotted around each ROC curve. Defaults to TRUE.
#' @param line_size A numeric representing the width of the ROC curve line. Defaults to 1.
#' @param print_auc If TRUE, the value of the AUC will be printed on the plot. Defaults to TRUE.
#' @param print_ci If TRUE, the range of the confidence interval will be printed on the plot. Defaults to TRUE.
#' @param print_auc_ci_font_size The font size for printed values for the AUC and confidence interval. Defaults to 4.
#' @param print_auc_ci_x A vector of x (horizontal) positions determining where on the plot the AUC and confidence interval values will be printed.
#' @param print_auc_ci_y A vector of y (vertical) positions determining where on the plot the AUC and confidence interval values will be printed.
#' @param plot_legend If TRUE, a legend will be printed on all plots.
#' @param plot_title The title of the plot
#' @param facet_n_row The number of rows used to plot the facet_plot. Defaults to NULL.
#' @param facet_n_col The number of columns used to plot the facet_plot. Defaults to 2.
#' @param legend_spacing If TRUE, there will be spacing between the legend items. Defaults to FALSE.
#'
#' @return Nothing (this function plots a series of graphs)
#'
#' @seealso \code{\link{language_model}}, \code{\link{comparison_model}}, \code{\link{test_language_model}}
#'
#' @import ggplot2 scales
#' @importFrom rlang .data
#'
#' @export
#'
#' @examples
#'
#' \dontrun{
#' strong_movie_review_data$cleanText = clean_text(strong_movie_review_data$text)
#' mild_movie_review_data$cleanText = clean_text(mild_movie_review_data$text)
#'
#' # Using language to predict "Positive" vs. "Negative" reviews
#' # Only for strong reviews (ratings of 1 or 10)
#' movie_model_strong = language_model(strong_movie_review_data,
#' outcome = "valence",
#' outcomeType = "binary",
#' text = "cleanText",
#' progressBar = FALSE)
#'
#' # Using language to predict "Positive" vs. "Negative" reviews
#' # Only for mild reviews (ratings of 4 or 7)
#' movie_model_mild = language_model(mild_movie_review_data,
#' outcome = "valence",
#' outcomeType = "binary",
#' text = "cleanText",
#' progressBar = FALSE)
#'
#'
#' # Plot ROC curves
#' plot_roc(movie_model_strong, movie_model_mild)
#' }
plot_roc = function(..., individual_plot=TRUE, combined_plot=TRUE, facet_plot=TRUE, facet_summary=TRUE, colors, model_names, plot_auc_polygon=TRUE, plot_ci=TRUE, line_size=1, print_auc=TRUE, print_ci=TRUE, print_auc_ci_font_size=4, print_auc_ci_x, print_auc_ci_y, plot_legend=TRUE, plot_title, facet_n_row=NULL, facet_n_col=2, legend_spacing=FALSE) {
facet=model=polygon.x=polygon.y=specificities=sensitivities=percent2p5=percent97p5=label_text=x=y=NULL
dots = list(...)
dots_names = match.call(expand.dots = FALSE)
for (i in 1:length(dots)) {
input = dots[[i]]
if (!(class(input) %in% c("langModel", "compModel", "testAssessment"))) {
stop(paste0("Your argument '", as.character(dots_names$...[[i]]), "'must be a model generated by either the `language_model` or 'comparison_model` functions."))
}
if (input@type != "binary") {
stop(paste0("ROCs can only be plotted for models with a binary outcome variable (`",as.character(dots_names$...[[i]]),"` does not have a binary outcome)."))
}
if (class(input) == "testAssessment") {
if (!is.vector(as.character(dots_names$...))) {
namelist = c(as.character(dots_names$...))
}
else {
namelist = as.character(dots_names$...)
}
if (!input@trainedModel %in% namelist){
result = askYesNo(paste0("`",namelist[i],"` is the outcome of testing a model on new data, but the original model (`",input@trainedModel,"`) has not been included. Are you sure you want to continue without including it?"))
if (is.na(result)) {
stop("Function aborted.")
}
if (!result) {
stop("Function aborted.")
}
}
}
}
model_labels = data.frame(matrix(ncol=4,nrow=0))
colnames(model_labels) = c("name", "auc", "ci_lower", "ci_upper")
roc_plot_data = data.frame(matrix(ncol=2,nrow=0))
colnames(roc_plot_data) = c("specificities", "sensitivities")
roc_ci_plot_data = data.frame(matrix(ncol=4, nrow=0))
colnames(roc_ci_plot_data) = c("percent2p5", "percent50", "percent97p5", "sensitivities")
roc_list = list()
for (i in 1:length(dots)) {
input = dots[[i]]
roc_data = input@roc
roc_list[[as.character(dots_names$...[[i]])]] = roc_data
roc_ci_data = input@roc_ci
roc_data_formatted = data.frame(specificities=roc_data$specificities, sensitivities=roc_data$sensitivities)
roc_data_formatted$model = as.character(dots_names$...[[i]])
roc_ci_data_formatted = as.data.frame(roc_ci_data)
roc_ci_data_formatted$sensitivities = as.numeric(rownames(roc_ci_data_formatted))
colnames(roc_ci_data_formatted) = c("percent2p5", "percent50", "percent97p5", "sensitivities")
roc_ci_data_formatted$model = as.character(dots_names$...[[i]])
roc_plot_data = rbind(roc_plot_data, roc_data_formatted)
roc_ci_plot_data = rbind(roc_ci_plot_data, roc_ci_data_formatted)
temp_frame = data.frame(name=as.character(dots_names$...[[i]]), auc = roc_data$auc, ci_lower = roc_data$ci[1], ci_upper = roc_data$ci[3])
model_labels = rbind(model_labels, temp_frame)
}
roc_plot_data$model = factor(roc_plot_data$model, levels = model_labels$name)
roc_ci_plot_data$model = factor(roc_ci_plot_data$model, levels = model_labels$name)
lowest_auc_model = model_labels$name[which.min(model_labels$auc)]
box_df = data.frame(polygon.x = c(0,0,1,1,0), polygon.y = c(0,1,1,0,0))
auc_df = data.frame(matrix(ncol=3,nrow=0))
colnames(auc_df) = c("sensitivities", "specificities", "model")
for (j in 1:nrow(model_labels)) {
auc_df_add = subset(roc_plot_data, model == model_labels$name[j])
auc_extra = data.frame(sensitivities = c(1,0,0), specificities = c(0,0,1), model=rep(model_labels$name[j],3))
auc_df = rbind(auc_df, auc_df_add, auc_extra)
}
auc_labels = data.frame(matrix(ncol=4,nrow=0))
colnames(auc_labels) = c("model", "label_text", "x", "y")
for (j in 1:nrow(model_labels)) {
model = model_labels$name[j]
label_text = paste("AUC: ",round(model_labels$auc[j],3), "\nCI: (", round(model_labels$ci_lower[j],3), "-", round(model_labels$ci_upper[j],3),")",sep="")
temp_frame = data.frame(model=model, label_text=label_text, x=.4, y=.6-(.1*j))
auc_labels = rbind(auc_labels, temp_frame)
}
roc_plot_data_all = CreateAllFacet(roc_plot_data, "model")
roc_ci_plot_data_all = CreateAllFacet(roc_ci_plot_data, "model")
auc_df$facet = auc_df$model
auc_df_add = subset(roc_plot_data, model == lowest_auc_model)
auc_df_add$facet = "all"
auc_extra = data.frame(sensitivities = c(1,0,0), specificities = c(0,0,1), model=rep(model_labels$name[j],3), facet=rep("all",3))
auc_df = rbind(auc_df, auc_df_add, auc_extra)
roc_plot_data_all$facet = factor(roc_plot_data_all$facet, levels = c(model_labels$name, "all"))
roc_ci_plot_data_all$facet = factor(roc_ci_plot_data_all$facet, levels = c(model_labels$name, "all"))
auc_df$facet = factor(auc_df$facet, levels = c(model_labels$name, "all"))
auc_labels$facet = "all"
auc_labels_new = auc_labels
auc_labels_new$facet = auc_labels_new$model
auc_labels_new$y=max(auc_labels_new$y)
auc_labels = rbind(auc_labels_new, auc_labels)
auc_labels$facet = factor(auc_labels$facet, levels = c(model_labels$name, "all"))
original_auc_polygon_df=auc_df
original_roc_ci_df=roc_ci_plot_data_all
original_roc_curve_df=roc_plot_data_all
original_auc_ci_labels_df=auc_labels
if (nrow(model_labels) == 1) {
combined_plot=FALSE
facet_plot=FALSE
}
if(!individual_plot & !combined_plot & !facet_plot) {
stop("Given that all of the following arguments are FALSE, no plots will be printed: `individual_plot`, `combined_plot`, `facet_plot`")
}
box_df = data.frame(polygon.x = c(0,0,1,1,0), polygon.y = c(0,1,1,0,0))
if(individual_plot) {
for (i in 1:nrow(model_labels)) {
this_model = model_labels$name[i]
auc_polygon_df = subset(original_auc_polygon_df, facet != "all")
auc_polygon_df = subset(auc_polygon_df, model == this_model)
roc_ci_df = subset(original_roc_ci_df, facet != "all")
roc_ci_df = subset(roc_ci_df, model == this_model)
roc_curve_df = subset(original_roc_curve_df, facet != "all")
roc_curve_df = subset(roc_curve_df, model == this_model)
auc_ci_labels_df = subset(original_auc_ci_labels_df, facet != "all")
auc_ci_labels_df = subset(auc_ci_labels_df, model == this_model)
lowest_auc_model = model_labels$name[which.min(model_labels$auc)]
p = ggplot() +
geom_path(data=box_df, aes(x=polygon.x, y=polygon.y))
if(plot_auc_polygon) {
p = p + geom_polygon(data=auc_polygon_df, aes(x=specificities, y=sensitivities), alpha=.3)
}
if(!missing(colors)) {
if (nrow(model_labels) != length(colors)) {
stop("The length of the argument `colors` must be equal to the number of models in your assessment.")
}
this_color = colors[i]
}
else {
this_color = hue_pal()(nrow(model_labels))[i]
}
if(plot_ci) {
if(!legend_spacing) {
p = p + geom_ribbon(data=roc_ci_df, aes(xmin=percent2p5, xmax=percent97p5, y=sensitivities), fill=this_color, color=this_color, alpha=.4, size=.4)
}
else {
p = p + geom_ribbon(data=roc_ci_df, aes(xmin=percent2p5, xmax=percent97p5, y=sensitivities), fill=this_color, color=this_color, alpha=.4, size=.4, key_glyph="polygon4")
}
}
if(!legend_spacing) {
p = p + geom_line(data=roc_curve_df, aes(x=specificities, y=sensitivities), color=this_color, size=line_size)
}
else {
p = p + geom_line(data=roc_curve_df, aes(x=specificities, y=sensitivities), color=this_color, size=line_size, key_glyph="path4")
}
p = p + geom_segment(aes(x=1, xend=0, y=0, yend=1), linetype="dashed")
if(!missing(print_auc_ci_x)) {
if (!print_auc & !print_ci) {
warning("The argument `print_auc_ci_x` will not be used if the AUC and CI values are not being printed.")
}
if (length(print_auc_ci_x) > 1) {
auc_ci_labels_df$x = print_auc_ci_x[1]
}
else {
auc_ci_labels_df$x = print_auc_ci_x
}
}
if(!missing(print_auc_ci_y)) {
if (!print_auc & !print_ci) {
warning("The argument `print_auc_ci_y` will not be used if the AUC and CI values are not being printed.")
}
if (length(print_auc_ci_y) > 1) {
auc_ci_labels_df$y = print_auc_ci_y[1]
}
else {
auc_ci_labels_df$y = print_auc_ci_y
}
}
if(print_auc & print_ci) {
p = p + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y), color=this_color, size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
if (!print_ci & print_auc) {
temp_obj = unlist(strsplit(auc_ci_labels_df$label_text, split="\n"))
n <- length(temp_obj)
auc_ci_labels_df$label_text = c(temp_obj[seq(n) %% 2 == 1])
p = p + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y), color=this_color, size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
if (!print_auc & print_ci) {
temp_obj = unlist(strsplit(auc_ci_labels_df$label_text, split="\n"))
n <- length(temp_obj)
auc_ci_labels_df$label_text = c(rev(temp_obj[seq(n) %% 2 == 0]))
p = p + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y), color=this_color, size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
p = p +
scale_x_reverse() +
labs(x="Sensitivity", y="Specificity", color="Model", fill="Model") +
theme_bw() +
coord_fixed() +
theme(legend.position = "none")
if(!missing(plot_title)) {
if (nrow(model_labels) != length(plot_title)) {
stop("If `individual_plots` is TRUE, the length of the argument `plot_title` must be equal to the number of models in your assessment.")
}
p = p + ggtitle(plot_title[i])
}
else if(!missing(model_names)) {
if (nrow(model_labels) != length(model_names)) {
stop("The length of the argument `model_names` must be equal to the number of models in your assessment.")
}
p = p + ggtitle(model_names[i])
}
else {
p = p + ggtitle(model_labels$name[i])
}
if(legend_spacing) {
p = p + theme(legend.key = element_rect(color = NA, fill = NA),
legend.key.size = unit(1.2, "cm"))
}
plot(p)
}
}
if(combined_plot) {
auc_polygon_df = subset(original_auc_polygon_df, facet != "all")
roc_ci_df = subset(original_roc_ci_df, facet != "all")
roc_curve_df = subset(original_roc_curve_df, facet != "all")
auc_ci_labels_df = subset(original_auc_ci_labels_df, facet == "all")
lowest_auc_model = model_labels$name[which.min(model_labels$auc)]
p = ggplot() +
geom_path(data=box_df, aes(x=polygon.x, y=polygon.y))
if(plot_auc_polygon) {
p = p + geom_polygon(data=subset(auc_polygon_df, model == lowest_auc_model), aes(x=specificities, y=sensitivities), alpha=.3)
}
if(plot_ci) {
if(!legend_spacing) {
p = p + geom_ribbon(data=roc_ci_df, aes(xmin=percent2p5, xmax=percent97p5, y=sensitivities, fill=model, color=model), alpha=.4, size=.4)
}
else {
p = p + geom_ribbon(data=roc_ci_df, aes(xmin=percent2p5, xmax=percent97p5, y=sensitivities, fill=model, color=model), alpha=.4, size=.4, key_glyph="polygon4")
}
}
if(!legend_spacing) {
p = p + geom_line(data=roc_curve_df, aes(x=specificities, y=sensitivities, color=model), size=line_size)
}
else {
p = p + geom_line(data=roc_curve_df, aes(x=specificities, y=sensitivities, color=model), size=line_size, key_glyph="path4")
}
p = p + geom_segment(aes(x=1, xend=0, y=0, yend=1), linetype="dashed")
if(!missing(print_auc_ci_x)) {
if (!print_auc & !print_ci) {
warning("The argument `print_auc_ci_x` will not be used if the AUC and CI values are not being printed.")
}
if (nrow(model_labels) != length(print_auc_ci_x)) {
stop("The length of the argument `print_auc_ci_x` must be equal to the number of models in your assessment.")
}
auc_ci_labels_df$x = print_auc_ci_x
}
if(!missing(print_auc_ci_y)) {
if (!print_auc & !print_ci) {
warning("The argument `print_auc_ci_y` will not be used if the AUC and CI values are not being printed.")
}
if (nrow(model_labels) != length(print_auc_ci_y)) {
stop("The length of the argument `print_auc_ci_y` must be equal to the number of models in your assessment.")
}
auc_ci_labels_df$y = print_auc_ci_y
}
if(print_auc & print_ci) {
p = p + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y, color=model), size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
if (!print_ci & print_auc) {
temp_obj = unlist(strsplit(auc_ci_labels_df$label_text, split="\n"))
n <- length(temp_obj)
auc_ci_labels_df$label_text = c(temp_obj[seq(n) %% 2 == 1])
p = p + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y, color=model), size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
if (!print_auc & print_ci) {
temp_obj = unlist(strsplit(auc_ci_labels_df$label_text, split="\n"))
n <- length(temp_obj)
auc_ci_labels_df$label_text = c(rev(temp_obj[seq(n) %% 2 == 0]))
p = p + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y, color=model), size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
p = p +
scale_x_reverse() +
labs(x="Sensitivity", y="Specificity", color="Model", fill="Model") +
theme_bw() +
coord_fixed()
if(!plot_legend) {
p = p + theme(legend.position = "none")
}
if(!missing(plot_title)) {
p = p + ggtitle(plot_title)
}
if(!missing(colors) & !missing(model_names)) {
if (nrow(model_labels) != length(colors)) {
stop("The length of the argument `colors` must be equal to the number of models in your assessment.")
}
if (nrow(model_labels) != length(model_names)) {
stop("The length of the argument `model_names` must be equal to the number of models in your assessment.")
}
p = p + scale_fill_manual(values=colors, labels=model_names)
p = p + scale_color_manual(values=colors, labels=model_names)
}
if(!missing(colors) & missing(model_names)) {
if (nrow(model_labels) != length(colors)) {
stop("The length of the argument `colors` must be equal to the number of models in your assessment.")
}
p = p + scale_fill_manual(values=colors)
p = p + scale_color_manual(values=colors)
}
if(!missing(model_names) & missing(colors)) {
if (nrow(model_labels) != length(model_names)) {
stop("The length of the argument `model_names` must be equal to the number of models in your assessment.")
}
p = p + scale_fill_discrete(labels=model_names)
p = p + scale_color_discrete(labels=model_names)
}
if(legend_spacing) {
p = p + theme(legend.key = element_rect(color = NA, fill = NA),
legend.key.size = unit(1.2, "cm"))
}
plot(p)
}
if(facet_plot) {
if (facet_summary) {
auc_polygon_df = original_auc_polygon_df
roc_ci_df = original_roc_ci_df
roc_curve_df = original_roc_curve_df
auc_ci_labels_df = original_auc_ci_labels_df
}
else {
auc_polygon_df = subset(original_auc_polygon_df, facet != "all")
roc_ci_df = subset(original_roc_ci_df, facet != "all")
roc_curve_df = subset(original_roc_curve_df, facet != "all")
auc_ci_labels_df = subset(original_auc_ci_labels_df, facet != "all")
}
lowest_auc_model = model_labels$name[which.min(model_labels$auc)]
q = ggplot(data=roc_curve_df) +
geom_path(data=box_df, aes(x=polygon.x, y=polygon.y))
if(plot_auc_polygon) {
q = q + geom_polygon(data=auc_polygon_df, aes(x=specificities, y=sensitivities), alpha=.3)
}
if(plot_ci) {
if(!legend_spacing) {
q = q + geom_ribbon(data=roc_ci_df, aes(xmin=percent2p5, xmax=percent97p5, y=sensitivities, fill=model, color=model), alpha=.4, size=.4)
}
else {
q = q + geom_ribbon(data=roc_ci_df, aes(xmin=percent2p5, xmax=percent97p5, y=sensitivities, fill=model, color=model), alpha=.4, size=.4, key_glyph="polygon4")
}
}
if(!legend_spacing) {
q = q + geom_line(data=roc_curve_df, aes(x=specificities, y=sensitivities, color=model), size=line_size)
}
else {
q = q + geom_line(data=roc_curve_df, aes(x=specificities, y=sensitivities, color=model), size=line_size, key_glyph="path4")
}
q = q + geom_segment(aes(x=1, xend=0, y=0, yend=1), linetype="dashed")
if(!missing(print_auc_ci_x)) {
if (!print_auc & !print_ci) {
warning("The argument `print_auc_ci_x` will not be used if the AUC and CI values are not being printed.")
}
if (facet_summary & nrow(model_labels) != length(print_auc_ci_x)) {
stop("The length of the argument `print_auc_ci_x` must be equal to the number of models in your assessment.")
}
else if (!facet_summary & length(print_auc_ci_x) != 1) {
warning("When `facet_summary` is FALSE, only the first value of `print_auc_ci_x` will be used for faceted graphs.")
}
auc_ci_labels_df$x = print_auc_ci_x
}
if(!missing(print_auc_ci_y)) {
if (!print_auc & !print_ci) {
warning("The argument `print_auc_ci_y` will not be used if the AUC and CI values are not being printed.")
}
if (facet_summary & nrow(model_labels) != length(print_auc_ci_y)) {
stop("The length of the argument `print_auc_ci_y` must be equal to the number of models in your assessment.")
}
else if (!facet_summary & length(print_auc_ci_y) != 1) {
warning("When `facet_summary` is FALSE, only the first value of `print_auc_ci_y` will be used for faceted graphs.")
}
auc_ci_labels_df$y = print_auc_ci_y
}
if(print_auc & print_ci) {
q = q + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y, color=model), size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
if (!print_ci & print_auc) {
temp_obj = unlist(strsplit(auc_ci_labels_df$label_text, split="\n"))
n <- length(temp_obj)
auc_ci_labels_df$label_text = c(temp_obj[seq(n) %% 2 == 1])
q = q + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y, color=model), size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
if (!print_auc & print_ci) {
temp_obj = unlist(strsplit(auc_ci_labels_df$label_text, split="\n"))
n <- length(temp_obj)
auc_ci_labels_df$label_text = c(rev(temp_obj[seq(n) %% 2 == 0]))
q = q + geom_text(data=auc_ci_labels_df, aes(label=label_text, x=x, y=y, color=model), size=print_auc_ci_font_size, hjust=0, show.legend = FALSE)
}
q = q +
facet_wrap(~facet, ncol=facet_n_col, nrow=facet_n_row) +
# facet_wrap(~facet) +
scale_x_reverse() +
labs(x="Sensitivity", y="Specificity", color="Model", fill="Model") +
theme_bw() +
coord_fixed()
if(!plot_legend) {
q = q + theme(legend.position = "none")
}
if(!missing(plot_title)) {
q = q + ggtitle(plot_title)
}
if(!missing(colors) & !missing(model_names)) {
if (nrow(model_labels) != length(colors)) {
stop("The length of the argument `colors` must be equal to the number of models in your assessment.")
}
if (nrow(model_labels) != length(model_names)) {
stop("The length of the argument `model_names` must be equal to the number of models in your assessment.")
}
q = q + scale_fill_manual(values=colors, labels=model_names)
q = q + scale_color_manual(values=colors, labels=model_names)
}
if(!missing(colors) & missing(model_names)) {
if (nrow(model_labels) != length(colors)) {
stop("The length of the argument `colors` must be equal to the number of models in your assessment.")
}
q = q + scale_fill_manual(values=colors)
q = q + scale_color_manual(values=colors)
}
if(!missing(model_names) & missing(colors)) {
if (nrow(model_labels) != length(model_names)) {
stop("The length of the argument `model_names` must be equal to the number of models in your assessment.")
}
q = q + scale_fill_discrete(labels=model_names)
q = q + scale_color_discrete(labels=model_names)
}
if(legend_spacing) {
q = q + theme(legend.key = element_rect(color = NA, fill = NA),
legend.key.size = unit(1.2, "cm"))
}
plot(q)
}
}
|
#' Calculates the comemberships of all pairs of a vector of clustering labels.
#'
#' For a set of clustering labels, this function computes the comembership of all
#' pairs of observations. Basically, two observations are said to be comembers if
#' they are clustered together.
#'
#' Tibshirani and Walther (2005) use the term 'co-membership', which we shorten
#' to 'comembership'. Some authors instead use the terms 'connectivity' or
#' 'co-occurrence'.
#'
#' We use the \code{Rcpp} package to improve the runtime speed of this function.
#'
#' @export
#' @param labels a vector of \code{n} clustering labels
#' @return a vector of \code{choose(n, 2)} comembership bits
#' @references Tibshirani, R. and Walther, G. (2005), Cluster Validation by
#' Prediction Strength, _Journal of Computational and Graphical Statistics_, 14,
#' 3, 511-528.
#' \url{http://amstat.tandfonline.com/doi/abs/10.1198/106186005X59243}.
#' @examples
#' # We generate K = 3 labels for each of n = 10 observations and compute the
#' # comembership for all 'n choose 2' pairs.
#' set.seed(42)
#' K <- 3
#' n <- 10
#' labels <- sample.int(K, n, replace = TRUE)
#' comembership_out <- comembership(labels)
#' comembership_out
#'
#' # Notice that the number of comemberships is 'n choose 2'.
#' length(comembership_out) == choose(n, 2)
comembership <- function(labels) {
.Call("rcpp_comembership", labels, PACKAGE = "clusteval")
}
#' Calculates the 2x2 contingency table of agreements and disagreements of
#' comemberships from two vectors of clustering labels.
#'
#' For two clusterings of the same data set, this function calculates the 2x2
#' contingency table of agreements and disagreements of the corresponding two
#' vectors of comemberships. Basically, the comembership is defined as the pairs
#' of observations that are clustered together.
#'
#' The contingency table calculated is typically utilized in the calculation of
#' a similarity statistic (e.g., Rand index, Jaccard index) between the two
#' clusterings. The 2x2 contingency table consists of the following four cells:
#' \describe{
#' \item{n_11}{the number of observation pairs where both observations are
#' comembers in both clusterings}
#' \item{n_10}{the number of observation pairs where the observations are
#' comembers in the first clustering but not the second}
#' \item{n_01}{the number of observation pairs where the observations are
#' comembers in the second clustering but not the first}
#' \item{n_00}{the number of observation pairs where neither pair are comembers
#' in either clustering}
#' }
#'
#' Tibshirani and Walther (2005) use the term 'co-membership', which we shorten
#' to 'comembership'. Some authors instead use the terms 'connectivity' or
#' 'co-occurrence'.
#'
#' We use the \code{Rcpp} package to improve the runtime speed of this function.
#'
#' @export
#' @param labels1 a vector of \code{n} clustering labels
#' @param labels2 a vector of \code{n} clustering labels
#' @return named list containing the calculated contingency table:
#' \itemize{
#' \item n_11
#' \item n_10
#' \item n_01
#' \item n_00
#' }
#' @references Tibshirani, R. and Walther, G. (2005). Cluster Validation by
#' Prediction Strength. Journal of Computational and Graphical Statistics, 14, 3,
#' 511-528. \url{http://amstat.tandfonline.com/doi/abs/10.1198/106186005X59243}.
#' @examples
#' # We generate K = 3 labels for each of n = 10 observations and compute the
#' # comembership for all 'n choose 2' pairs.
#' set.seed(42)
#' K <- 3
#' n <- 10
#' labels1 <- sample.int(K, n, replace = TRUE)
#' labels2 <- sample.int(K, n, replace = TRUE)
#' comembership_table(labels1, labels2)
#'
#' # Here, we cluster the \code{\link{iris}} data set with the K-means and
#' # hierarchical algorithms using the true number of clusters, K = 3.
#' # Then, we compute the 2x2 contingency table agreements and disagreements of
#' #' the comemberships.
#' iris_kmeans <- kmeans(iris[, -5], centers = 3)$cluster
#' iris_hclust <- cutree(hclust(dist(iris[, -5])), k = 3)
#' comembership_table(iris_kmeans, iris_hclust)
comembership_table <- function(labels1, labels2) {
if (length(labels1) != length(labels2)) {
stop("The two vectors of cluster labels must be of equal length.");
}
.Call("rcpp_comembership_table", labels1, labels2, PACKAGE = "clusteval")
}
| /fuzzedpackages/clusteval/R/comembership.r | no_license | akhikolla/testpackages | R | false | false | 4,325 | r | #' Calculates the comemberships of all pairs of a vector of clustering labels.
#'
#' For a set of clustering labels, this function computes the comembership of all
#' pairs of observations. Basically, two observations are said to be comembers if
#' they are clustered together.
#'
#' Tibshirani and Walther (2005) use the term 'co-membership', which we shorten
#' to 'comembership'. Some authors instead use the terms 'connectivity' or
#' 'co-occurrence'.
#'
#' We use the \code{Rcpp} package to improve the runtime speed of this function.
#'
#' @export
#' @param labels a vector of \code{n} clustering labels
#' @return a vector of \code{choose(n, 2)} comembership bits
#' @references Tibshirani, R. and Walther, G. (2005), Cluster Validation by
#' Prediction Strength, _Journal of Computational and Graphical Statistics_, 14,
#' 3, 511-528.
#' \url{http://amstat.tandfonline.com/doi/abs/10.1198/106186005X59243}.
#' @examples
#' # We generate K = 3 labels for each of n = 10 observations and compute the
#' # comembership for all 'n choose 2' pairs.
#' set.seed(42)
#' K <- 3
#' n <- 10
#' labels <- sample.int(K, n, replace = TRUE)
#' comembership_out <- comembership(labels)
#' comembership_out
#'
#' # Notice that the number of comemberships is 'n choose 2'.
#' length(comembership_out) == choose(n, 2)
comembership <- function(labels) {
.Call("rcpp_comembership", labels, PACKAGE = "clusteval")
}
#' Calculates the 2x2 contingency table of agreements and disagreements of
#' comemberships from two vectors of clustering labels.
#'
#' For two clusterings of the same data set, this function calculates the 2x2
#' contingency table of agreements and disagreements of the corresponding two
#' vectors of comemberships. Basically, the comembership is defined as the pairs
#' of observations that are clustered together.
#'
#' The contingency table calculated is typically utilized in the calculation of
#' a similarity statistic (e.g., Rand index, Jaccard index) between the two
#' clusterings. The 2x2 contingency table consists of the following four cells:
#' \describe{
#' \item{n_11}{the number of observation pairs where both observations are
#' comembers in both clusterings}
#' \item{n_10}{the number of observation pairs where the observations are
#' comembers in the first clustering but not the second}
#' \item{n_01}{the number of observation pairs where the observations are
#' comembers in the second clustering but not the first}
#' \item{n_00}{the number of observation pairs where neither pair are comembers
#' in either clustering}
#' }
#'
#' Tibshirani and Walther (2005) use the term 'co-membership', which we shorten
#' to 'comembership'. Some authors instead use the terms 'connectivity' or
#' 'co-occurrence'.
#'
#' We use the \code{Rcpp} package to improve the runtime speed of this function.
#'
#' @export
#' @param labels1 a vector of \code{n} clustering labels
#' @param labels2 a vector of \code{n} clustering labels
#' @return named list containing the calculated contingency table:
#' \itemize{
#' \item n_11
#' \item n_10
#' \item n_01
#' \item n_00
#' }
#' @references Tibshirani, R. and Walther, G. (2005). Cluster Validation by
#' Prediction Strength. Journal of Computational and Graphical Statistics, 14, 3,
#' 511-528. \url{http://amstat.tandfonline.com/doi/abs/10.1198/106186005X59243}.
#' @examples
#' # We generate K = 3 labels for each of n = 10 observations and compute the
#' # comembership for all 'n choose 2' pairs.
#' set.seed(42)
#' K <- 3
#' n <- 10
#' labels1 <- sample.int(K, n, replace = TRUE)
#' labels2 <- sample.int(K, n, replace = TRUE)
#' comembership_table(labels1, labels2)
#'
#' # Here, we cluster the \code{\link{iris}} data set with the K-means and
#' # hierarchical algorithms using the true number of clusters, K = 3.
#' # Then, we compute the 2x2 contingency table agreements and disagreements of
#' #' the comemberships.
#' iris_kmeans <- kmeans(iris[, -5], centers = 3)$cluster
#' iris_hclust <- cutree(hclust(dist(iris[, -5])), k = 3)
#' comembership_table(iris_kmeans, iris_hclust)
comembership_table <- function(labels1, labels2) {
if (length(labels1) != length(labels2)) {
stop("The two vectors of cluster labels must be of equal length.");
}
.Call("rcpp_comembership_table", labels1, labels2, PACKAGE = "clusteval")
}
|
#' Run demo shiny app
#'
#' \code{run_demo} runs demo shiny app which logs different types of events.
#' \code{run_demo_dashboard} runs demo shiny dashboard that allows
#' for interactive analysis of events from demo app.
#' The demo app can be also run in background and events fired in the app
#' can be seen immediately in the demo dashboard.
#'
#' @param in_background A logical.
#' If \code{TRUE} the demo shiny app is run in the background on port 5555.
#' Default is \code{FALSE}.
#'
#' @describeIn run_demo Run demo shiny app
#'
#' @import shiny
#' @importFrom utils browseURL
#'
#' @export
#' @examples
#' \donttest{
#' if (interactive()) {
#' run_demo(in_background = TRUE)
#' run_demo_dashboard()
#' }
#' }
run_demo <- function(in_background = FALSE) {
app_path <- system.file("shiny", "demoapp", package = "shinyEventLogger")
if (in_background) {
system(
paste0("R -e \"shiny::runApp('", app_path,
"', port = 5555, display.mode = 'normal') \""),
wait = FALSE,
invisible = TRUE
)
message("The App is available on 127.0.0.1:5555")
utils::browseURL("http://127.0.0.1:5555")
} else {
shiny::runApp(app_path, display.mode = "normal")
}
} # end of run_demo
#' @describeIn run_demo Run demo shiny dashboard
#' @export
run_demo_dashboard <- function() {
app_path <- system.file("shiny", "dashboardapp", package = "shinyEventLogger")
shiny::runApp(app_path, display.mode = "normal")
} # end of run_demo
| /R/run_demo.R | permissive | cran/shinyEventLogger | R | false | false | 1,553 | r | #' Run demo shiny app
#'
#' \code{run_demo} runs demo shiny app which logs different types of events.
#' \code{run_demo_dashboard} runs demo shiny dashboard that allows
#' for interactive analysis of events from demo app.
#' The demo app can be also run in background and events fired in the app
#' can be seen immediately in the demo dashboard.
#'
#' @param in_background A logical.
#' If \code{TRUE} the demo shiny app is run in the background on port 5555.
#' Default is \code{FALSE}.
#'
#' @describeIn run_demo Run demo shiny app
#'
#' @import shiny
#' @importFrom utils browseURL
#'
#' @export
#' @examples
#' \donttest{
#' if (interactive()) {
#' run_demo(in_background = TRUE)
#' run_demo_dashboard()
#' }
#' }
run_demo <- function(in_background = FALSE) {
app_path <- system.file("shiny", "demoapp", package = "shinyEventLogger")
if (in_background) {
system(
paste0("R -e \"shiny::runApp('", app_path,
"', port = 5555, display.mode = 'normal') \""),
wait = FALSE,
invisible = TRUE
)
message("The App is available on 127.0.0.1:5555")
utils::browseURL("http://127.0.0.1:5555")
} else {
shiny::runApp(app_path, display.mode = "normal")
}
} # end of run_demo
#' @describeIn run_demo Run demo shiny dashboard
#' @export
run_demo_dashboard <- function() {
app_path <- system.file("shiny", "dashboardapp", package = "shinyEventLogger")
shiny::runApp(app_path, display.mode = "normal")
} # end of run_demo
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
invx <- NULL
set <- function(y) {
x <<- y
rev <<- NULL
}
get <- function() x
setinverse <- function(inverse) invx <<- inverse
getinverse <- function() invx
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invx <- x$getinverse()
if(!is.null(invx)) {
message("getting cached data")
return(invx)
}
data <- x$get()
invx <- solve(data, ...)
x$setinverse(invx)
invx
}
| /cachematrix.R | no_license | jogugil/ProgrammingAssignment2 | R | false | false | 764 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
invx <- NULL
set <- function(y) {
x <<- y
rev <<- NULL
}
get <- function() x
setinverse <- function(inverse) invx <<- inverse
getinverse <- function() invx
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invx <- x$getinverse()
if(!is.null(invx)) {
message("getting cached data")
return(invx)
}
data <- x$get()
invx <- solve(data, ...)
x$setinverse(invx)
invx
}
|
## CENTRAL PARAMS FILE FOR SAMOAN CRAB DEMOGRAPHY
## FIXED POINT ESTIMATES
# GROWTH ------------------------------------------------------------------
Linf = 200 # from S. serrata, estimated from Bonine et al. 2008
K = c(0.0045,0.57,1.38)[1] # from S. serrata,
# Moknes et al. 2015 (2nd value)
# S. serrata, Indonesia, La Sara (2010): 1.38
tZero = -0.0015 ## fixed
# First submission:
# Linf = 310 #Moksnes et al 2015
# K = 0.57 #Moksnes et al 2015
# tZero = -0.019 #Moksnes et al 2015
## longevity parameters
# longevDraw = maxage ## just for syntax
## Growth Increment for Logit(Pmolt); derived from vonB; Chen & Kennelly (1999)
bchen = exp(-K) - 1
achen = -1 * bchen * Linf
sdmolt = 10 ## assumes continuous variance in molt increment
# FECUNDITY ---------------------------------------------------------------
size.at.maturity = 100 # 91-100 mm (females, Prasad & Neelakantan 1989)
L50 = (91+100)/2 ## Midpoint of size at maturity from Prasad & Neelakantan (1989)
## slope of length-fecundity equation
## For Samoan crab, 15.55 x 100 eggs increase per mm of carapace width - Sarower et al. (2013), Bangladesh (1000 eggs per mm is Kona crab value beta = 2081.8; from Onizuka 1972)
beta = 15550 #1500
## fixed sex proportion of females - fixed by me (Megsie) because we don't know the sex ratio.
SR = 0.5
# MORTALITY ---------------------------------------------------------------
## hoenig (1983) "all species" values - for estimating mortality
hoenig.slope = -0.982
hoenig.int = 1.44
# HARVEST RATE POLICIES ---------------------------------------------------
harvest.breaks = 10 ## number of even breaks from 0 - 0.9 to simulate harvest mortality (can't do 100%, gives -Inf
# for rVal)
hmin = 0
hmax = 0.9 # max harvest rate
h.vector = c(seq(hmin,hmax,1/harvest.breaks))
## Parameters for BISSECTION METHOD to estimate the stationary harvest, Hstat
# Define the starting h values (for function runBissect)
Uhigh = 0.9 # High harvest rate to result in an eigenDom < 1
Ulow = 0 # Low harvest rate to result in an eigenDom >1
# Define number of iterations in the Bissection method
bissectIters <- 200
# Define convergence level in the Bissection method
bissectConv <- 0.00001
# VECTOR OF SIZE AT CAPTURE CUTOFFS ---------------------------------------
tc.vector = c(0,76,L50,152) ## all ages, smallest observed catch (this study), L50, legal size for Samoan crab from HI state (6 in = 152.4 mm)
HeeiaSel <- c(0, 0, 0, 0, 0.02, 0.14, 0.58, 1, 1) # discretized version of logistic sel curve
# make INITS object to save later -----------------------------------------
inits = list( 'TIMESTAMP' = as.character(date()),
'Linf' = Linf,
'K' = K,
'achen [growth increment for logit p(molt)]' = achen,
'bchen [growth increment for logit p(molt)]' = bchen,
'sdmolt' = sdmolt,
'tZero' = tZero,
'L50' = L50,
'hoenig.slope' = hoenig.slope,
'hoenig.int' = hoenig.int,
'beta' = beta,
'SR [sex ratio]' = SR,
'harvest.breaks' = harvest.breaks,
'UHigh' = Uhigh,
'Ulow' = Ulow,
'bissectIters' = bissectIters,
'bissectConv' = bissectConv,
'tc.vector' = tc.vector) | /Maia code/paramsScenarios.R | no_license | mkapur/crab-collab | R | false | false | 3,306 | r | ## CENTRAL PARAMS FILE FOR SAMOAN CRAB DEMOGRAPHY
## FIXED POINT ESTIMATES
# GROWTH ------------------------------------------------------------------
Linf = 200 # from S. serrata, estimated from Bonine et al. 2008
K = c(0.0045,0.57,1.38)[1] # from S. serrata,
# Moknes et al. 2015 (2nd value)
# S. serrata, Indonesia, La Sara (2010): 1.38
tZero = -0.0015 ## fixed
# First submission:
# Linf = 310 #Moksnes et al 2015
# K = 0.57 #Moksnes et al 2015
# tZero = -0.019 #Moksnes et al 2015
## longevity parameters
# longevDraw = maxage ## just for syntax
## Growth Increment for Logit(Pmolt); derived from vonB; Chen & Kennelly (1999)
bchen = exp(-K) - 1
achen = -1 * bchen * Linf
sdmolt = 10 ## assumes continuous variance in molt increment
# FECUNDITY ---------------------------------------------------------------
size.at.maturity = 100 # 91-100 mm (females, Prasad & Neelakantan 1989)
L50 = (91+100)/2 ## Midpoint of size at maturity from Prasad & Neelakantan (1989)
## slope of length-fecundity equation
## For Samoan crab, 15.55 x 100 eggs increase per mm of carapace width - Sarower et al. (2013), Bangladesh (1000 eggs per mm is Kona crab value beta = 2081.8; from Onizuka 1972)
beta = 15550 #1500
## fixed sex proportion of females - fixed by me (Megsie) because we don't know the sex ratio.
SR = 0.5
# MORTALITY ---------------------------------------------------------------
## hoenig (1983) "all species" values - for estimating mortality
hoenig.slope = -0.982
hoenig.int = 1.44
# HARVEST RATE POLICIES ---------------------------------------------------
harvest.breaks = 10 ## number of even breaks from 0 - 0.9 to simulate harvest mortality (can't do 100%, gives -Inf
# for rVal)
hmin = 0
hmax = 0.9 # max harvest rate
h.vector = c(seq(hmin,hmax,1/harvest.breaks))
## Parameters for BISSECTION METHOD to estimate the stationary harvest, Hstat
# Define the starting h values (for function runBissect)
Uhigh = 0.9 # High harvest rate to result in an eigenDom < 1
Ulow = 0 # Low harvest rate to result in an eigenDom >1
# Define number of iterations in the Bissection method
bissectIters <- 200
# Define convergence level in the Bissection method
bissectConv <- 0.00001
# VECTOR OF SIZE AT CAPTURE CUTOFFS ---------------------------------------
tc.vector = c(0,76,L50,152) ## all ages, smallest observed catch (this study), L50, legal size for Samoan crab from HI state (6 in = 152.4 mm)
HeeiaSel <- c(0, 0, 0, 0, 0.02, 0.14, 0.58, 1, 1) # discretized version of logistic sel curve
# make INITS object to save later -----------------------------------------
inits = list( 'TIMESTAMP' = as.character(date()),
'Linf' = Linf,
'K' = K,
'achen [growth increment for logit p(molt)]' = achen,
'bchen [growth increment for logit p(molt)]' = bchen,
'sdmolt' = sdmolt,
'tZero' = tZero,
'L50' = L50,
'hoenig.slope' = hoenig.slope,
'hoenig.int' = hoenig.int,
'beta' = beta,
'SR [sex ratio]' = SR,
'harvest.breaks' = harvest.breaks,
'UHigh' = Uhigh,
'Ulow' = Ulow,
'bissectIters' = bissectIters,
'bissectConv' = bissectConv,
'tc.vector' = tc.vector) |
# Ariketa bakoitzeko galdera bati dagokion informazioa aurkezteko modulua
## Interfazeari lotutako kodea
arikGaldAzterketaModuleUI <- function(id, testua) {
ns <- NS(id)
tagList(
h4(testua),
verbatimTextOutput(ns("testua")),
plotOutput(ns("plot"))
# Besteak gehitu
)
}
## Zerbitzariari lotutako kodea
arikGaldAzterketaModule<- function(input, output, session, data) {
likert.data <- reactive({
datuak <- data()
datuak$Balioa <- factor(datuak$Balioa, levels = 1:4,
labels = c("Guztiz kontra",
"Ez oso ados",
"Nahiko ados",
"Guztiz ados"))
likert(items=datuak[,"Balioa",drop=F])
})
output$testua <- renderPrint(likert.data())
output$plot <- renderPlot({
plot <- likert.bar.plot(likert.data(),plot.percents=T, legend="Erantzuna") + ylab("Portzentaia")
LHS <- 2
RHS <- 3
if (sum(is.na(plot$layers[[LHS]]$data$Item))>0) plot$layers <- plot$layers[-LHS]
if (sum(is.na(plot$layers[[RHS]]$data$Item))>0) plot$layers <- plot$layers[-RHS]
plot
})
} | /arikGaldAzterketaModule.R | no_license | jiplaolm/ZAAzterketa | R | false | false | 1,164 | r | # Ariketa bakoitzeko galdera bati dagokion informazioa aurkezteko modulua
## Interfazeari lotutako kodea
arikGaldAzterketaModuleUI <- function(id, testua) {
ns <- NS(id)
tagList(
h4(testua),
verbatimTextOutput(ns("testua")),
plotOutput(ns("plot"))
# Besteak gehitu
)
}
## Zerbitzariari lotutako kodea
arikGaldAzterketaModule<- function(input, output, session, data) {
likert.data <- reactive({
datuak <- data()
datuak$Balioa <- factor(datuak$Balioa, levels = 1:4,
labels = c("Guztiz kontra",
"Ez oso ados",
"Nahiko ados",
"Guztiz ados"))
likert(items=datuak[,"Balioa",drop=F])
})
output$testua <- renderPrint(likert.data())
output$plot <- renderPlot({
plot <- likert.bar.plot(likert.data(),plot.percents=T, legend="Erantzuna") + ylab("Portzentaia")
LHS <- 2
RHS <- 3
if (sum(is.na(plot$layers[[LHS]]$data$Item))>0) plot$layers <- plot$layers[-LHS]
if (sum(is.na(plot$layers[[RHS]]$data$Item))>0) plot$layers <- plot$layers[-RHS]
plot
})
} |
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.32673428104682e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615767502-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,804 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.32673428104682e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
### Read in which scaffolds that contain HC, MC and LC gene models
read.table("HC_gene-model_scaffolds.txt",head=F)->HC
read.table("MC_gene-model_scaffolds.txt",head=F)->MC
read.table("LC_gene-model_scaffolds.txt",head=F)->LC
### Read in the consensus haploid map
read.table("Consensus-maps.txt",head=T)->map
## Extract the scaffold name from the marker names and put them in an extra column
map$scaf<-NULL
for (i in 1:dim(map)[1]){
map$scaf[i]<-strsplit(as.character(map$marker[i]),split=":")[[1]][1]
}
map$scaf<-as.factor(map$scaf)
### Which map scaffolds and how many scaffolds contain the different confidence level gene models
HC_scaf_in_map<-map$scaf[levels(map$scaf) %in% levels(HC$V1)]
MC_scaf_in_map<-map$scaf[levels(map$scaf) %in% levels(MC$V1)]
LC_scaf_in_map<-map$scaf[levels(map$scaf) %in% levels(LC$V1)]
library(gdata)
HC_scaf_in_map<-droplevels(HC_scaf_in_map)
MC_scaf_in_map<-droplevels(MC_scaf_in_map)
LC_scaf_in_map<-droplevels(LC_scaf_in_map)
### Read in the inter and intra split scaffold information
read.table("intra_split_scaffolds.txt",head=F)->intra_split
read.table("inter_split_scaffolds.txt",head=F)->inter_split
### Which confidence level of gene models are located on the split scaffolds?
#Inter split
length(inter_split$V1[inter_split$V1 %in% HC$V1 & inter_split$V1 %in% MC$V1 & inter_split$V1 %in% LC$V1])
length(inter_split$V1[inter_split$V1 %in% HC$V1 & inter_split$V1 %in% MC$V1 & !(inter_split$V1 %in% LC$V1)])
length(inter_split$V1[inter_split$V1 %in% HC$V1 & !(inter_split$V1 %in% MC$V1) & inter_split$V1 %in% LC$V1])
length(inter_split$V1[!(inter_split$V1 %in% HC$V1) & inter_split$V1 %in% MC$V1 & inter_split$V1 %in% LC$V1])
length(inter_split$V1[inter_split$V1 %in% HC$V1 & !(inter_split$V1 %in% MC$V1) & !(inter_split$V1 %in% LC$V1)])
length(inter_split$V1[!(inter_split$V1 %in% HC$V1) & inter_split$V1 %in% MC$V1 & !(inter_split$V1 %in% LC$V1)])
length(inter_split$V1[!(inter_split$V1 %in% HC$V1) & !(inter_split$V1 %in% MC$V1) & inter_split$V1 %in% LC$V1])
#Intra split
length(intra_split$V1[intra_split$V1 %in% HC$V1 & intra_split$V1 %in% MC$V1 & intra_split$V1 %in% LC$V1])
length(intra_split$V1[intra_split$V1 %in% HC$V1 & intra_split$V1 %in% MC$V1 & !(intra_split$V1 %in% LC$V1)])
length(intra_split$V1[intra_split$V1 %in% HC$V1 & !(intra_split$V1 %in% MC$V1) & intra_split$V1 %in% LC$V1])
length(intra_split$V1[!(intra_split$V1 %in% HC$V1) & intra_split$V1 %in% MC$V1 & intra_split$V1 %in% LC$V1])
length(intra_split$V1[intra_split$V1 %in% HC$V1 & !(intra_split$V1 %in% MC$V1) & !(intra_split$V1 %in% LC$V1)])
length(intra_split$V1[!(intra_split$V1 %in% HC$V1) & intra_split$V1 %in% MC$V1 & !(intra_split$V1 %in% LC$V1)])
length(intra_split$V1[!(intra_split$V1 %in% HC$V1) & !(intra_split$V1 %in% MC$V1) & intra_split$V1 %in% LC$V1])
| /map_analysis/gene_model_confidance_in_map.R | permissive | parkingvarsson/HaploidSpruceMap | R | false | false | 2,799 | r | ### Read in which scaffolds that contain HC, MC and LC gene models
read.table("HC_gene-model_scaffolds.txt",head=F)->HC
read.table("MC_gene-model_scaffolds.txt",head=F)->MC
read.table("LC_gene-model_scaffolds.txt",head=F)->LC
### Read in the consensus haploid map
read.table("Consensus-maps.txt",head=T)->map
## Extract the scaffold name from the marker names and put them in an extra column
map$scaf<-NULL
for (i in 1:dim(map)[1]){
map$scaf[i]<-strsplit(as.character(map$marker[i]),split=":")[[1]][1]
}
map$scaf<-as.factor(map$scaf)
### Which map scaffolds and how many scaffolds contain the different confidence level gene models
HC_scaf_in_map<-map$scaf[levels(map$scaf) %in% levels(HC$V1)]
MC_scaf_in_map<-map$scaf[levels(map$scaf) %in% levels(MC$V1)]
LC_scaf_in_map<-map$scaf[levels(map$scaf) %in% levels(LC$V1)]
library(gdata)
HC_scaf_in_map<-droplevels(HC_scaf_in_map)
MC_scaf_in_map<-droplevels(MC_scaf_in_map)
LC_scaf_in_map<-droplevels(LC_scaf_in_map)
### Read in the inter and intra split scaffold information
read.table("intra_split_scaffolds.txt",head=F)->intra_split
read.table("inter_split_scaffolds.txt",head=F)->inter_split
### Which confidence level of gene models are located on the split scaffolds?
#Inter split
length(inter_split$V1[inter_split$V1 %in% HC$V1 & inter_split$V1 %in% MC$V1 & inter_split$V1 %in% LC$V1])
length(inter_split$V1[inter_split$V1 %in% HC$V1 & inter_split$V1 %in% MC$V1 & !(inter_split$V1 %in% LC$V1)])
length(inter_split$V1[inter_split$V1 %in% HC$V1 & !(inter_split$V1 %in% MC$V1) & inter_split$V1 %in% LC$V1])
length(inter_split$V1[!(inter_split$V1 %in% HC$V1) & inter_split$V1 %in% MC$V1 & inter_split$V1 %in% LC$V1])
length(inter_split$V1[inter_split$V1 %in% HC$V1 & !(inter_split$V1 %in% MC$V1) & !(inter_split$V1 %in% LC$V1)])
length(inter_split$V1[!(inter_split$V1 %in% HC$V1) & inter_split$V1 %in% MC$V1 & !(inter_split$V1 %in% LC$V1)])
length(inter_split$V1[!(inter_split$V1 %in% HC$V1) & !(inter_split$V1 %in% MC$V1) & inter_split$V1 %in% LC$V1])
#Intra split
length(intra_split$V1[intra_split$V1 %in% HC$V1 & intra_split$V1 %in% MC$V1 & intra_split$V1 %in% LC$V1])
length(intra_split$V1[intra_split$V1 %in% HC$V1 & intra_split$V1 %in% MC$V1 & !(intra_split$V1 %in% LC$V1)])
length(intra_split$V1[intra_split$V1 %in% HC$V1 & !(intra_split$V1 %in% MC$V1) & intra_split$V1 %in% LC$V1])
length(intra_split$V1[!(intra_split$V1 %in% HC$V1) & intra_split$V1 %in% MC$V1 & intra_split$V1 %in% LC$V1])
length(intra_split$V1[intra_split$V1 %in% HC$V1 & !(intra_split$V1 %in% MC$V1) & !(intra_split$V1 %in% LC$V1)])
length(intra_split$V1[!(intra_split$V1 %in% HC$V1) & intra_split$V1 %in% MC$V1 & !(intra_split$V1 %in% LC$V1)])
length(intra_split$V1[!(intra_split$V1 %in% HC$V1) & !(intra_split$V1 %in% MC$V1) & intra_split$V1 %in% LC$V1])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_geoms.R
\name{stat_missing}
\alias{stat_missing}
\title{Missing Data Markers}
\usage{
stat_missing(
mapping = NULL,
data = NULL,
geom = "point",
position = "identity",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
nudge_y = 0,
...,
shape = 24,
size = 4,
fill = "tan2",
color = "gray10"
)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{aes()}} or
\code{\link[ggplot2:aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link[ggplot2:ggplot]{ggplot()}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link[ggplot2:fortify]{fortify()}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame}, and
will be used as the layer data. A \code{function} can be created
from a \code{formula} (e.g. \code{~ head(.x, 10)}).}
\item{geom}{The geometric object to use display the data}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{na.rm}{If \code{FALSE}, the default, missing values are removed with
a warning. If \code{TRUE}, missing values are silently removed.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link[ggplot2:borders]{borders()}}.}
\item{nudge_y}{Vertical adjustment to nudge markers by.}
\item{..., shape, size, fill, color}{Other arguments passed on to
\code{\link[ggplot2:layer]{ggplot2::layer()}}.}
}
\value{
a ggplot2 stat.
}
\description{
Add missing data markers to a plot. This function is used
to represent years with missing data in column plots.
}
| /man/stat_missing.Rd | permissive | InteragencyEcologicalProgram/smonitr | R | false | true | 2,623 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_geoms.R
\name{stat_missing}
\alias{stat_missing}
\title{Missing Data Markers}
\usage{
stat_missing(
mapping = NULL,
data = NULL,
geom = "point",
position = "identity",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
nudge_y = 0,
...,
shape = 24,
size = 4,
fill = "tan2",
color = "gray10"
)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{aes()}} or
\code{\link[ggplot2:aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link[ggplot2:ggplot]{ggplot()}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link[ggplot2:fortify]{fortify()}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame}, and
will be used as the layer data. A \code{function} can be created
from a \code{formula} (e.g. \code{~ head(.x, 10)}).}
\item{geom}{The geometric object to use display the data}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{na.rm}{If \code{FALSE}, the default, missing values are removed with
a warning. If \code{TRUE}, missing values are silently removed.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link[ggplot2:borders]{borders()}}.}
\item{nudge_y}{Vertical adjustment to nudge markers by.}
\item{..., shape, size, fill, color}{Other arguments passed on to
\code{\link[ggplot2:layer]{ggplot2::layer()}}.}
}
\value{
a ggplot2 stat.
}
\description{
Add missing data markers to a plot. This function is used
to represent years with missing data in column plots.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/organizations_operations.R
\name{organizations_enable_all_features}
\alias{organizations_enable_all_features}
\title{Enables all features in an organization}
\usage{
organizations_enable_all_features()
}
\description{
Enables all features in an organization. This enables the use of
organization policies that can restrict the services and actions that
can be called in each account. Until you enable all features, you have
access only to consolidated billing, and you can't use any of the
advanced account administration features that AWS Organizations
supports. For more information, see \href{https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html}{Enabling All Features in Your Organization}
in the \emph{AWS Organizations User Guide.}
This operation is required only for organizations that were created
explicitly with only the consolidated billing features enabled. Calling
this operation sends a handshake to every invited account in the
organization. The feature set change can be finalized and the additional
features enabled only after all administrators in the invited accounts
approve the change by accepting the handshake.
After you enable all features, you can separately enable or disable
individual policy types in a root using EnablePolicyType and
DisablePolicyType. To see the status of policy types in a root, use
ListRoots.
After all invited member accounts accept the handshake, you finalize the
feature set change by accepting the handshake that contains
\code{"Action": "ENABLE_ALL_FEATURES"}. This completes the change.
After you enable all features in your organization, the management
account in the organization can apply policies on all member accounts.
These policies can restrict what users and even administrators in those
accounts can do. The management account can apply policies that prevent
accounts from leaving the organization. Ensure that your account
administrators are aware of this.
This operation can be called only from the organization's management
account.
}
\section{Request syntax}{
\preformatted{svc$enable_all_features()
}
}
\examples{
\dontrun{
# This example shows the administrator asking all the invited accounts in
# the organization to approve enabling all features in the organization.
# AWS Organizations sends an email to the address that is registered with
# every invited member account asking the owner to approve the change by
# accepting the handshake that is sent. After all invited member accounts
# accept the handshake, the organization administrator can finalize the
# change to enable all features, and those with appropriate permissions
# can create policies and apply them to roots, OUs, and accounts:/n/n
svc$enable_all_features()
}
}
\keyword{internal}
| /cran/paws.management/man/organizations_enable_all_features.Rd | permissive | sanchezvivi/paws | R | false | true | 2,858 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/organizations_operations.R
\name{organizations_enable_all_features}
\alias{organizations_enable_all_features}
\title{Enables all features in an organization}
\usage{
organizations_enable_all_features()
}
\description{
Enables all features in an organization. This enables the use of
organization policies that can restrict the services and actions that
can be called in each account. Until you enable all features, you have
access only to consolidated billing, and you can't use any of the
advanced account administration features that AWS Organizations
supports. For more information, see \href{https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html}{Enabling All Features in Your Organization}
in the \emph{AWS Organizations User Guide.}
This operation is required only for organizations that were created
explicitly with only the consolidated billing features enabled. Calling
this operation sends a handshake to every invited account in the
organization. The feature set change can be finalized and the additional
features enabled only after all administrators in the invited accounts
approve the change by accepting the handshake.
After you enable all features, you can separately enable or disable
individual policy types in a root using EnablePolicyType and
DisablePolicyType. To see the status of policy types in a root, use
ListRoots.
After all invited member accounts accept the handshake, you finalize the
feature set change by accepting the handshake that contains
\code{"Action": "ENABLE_ALL_FEATURES"}. This completes the change.
After you enable all features in your organization, the management
account in the organization can apply policies on all member accounts.
These policies can restrict what users and even administrators in those
accounts can do. The management account can apply policies that prevent
accounts from leaving the organization. Ensure that your account
administrators are aware of this.
This operation can be called only from the organization's management
account.
}
\section{Request syntax}{
\preformatted{svc$enable_all_features()
}
}
\examples{
\dontrun{
# This example shows the administrator asking all the invited accounts in
# the organization to approve enabling all features in the organization.
# AWS Organizations sends an email to the address that is registered with
# every invited member account asking the owner to approve the change by
# accepting the handshake that is sent. After all invited member accounts
# accept the handshake, the organization administrator can finalize the
# change to enable all features, and those with appropriate permissions
# can create policies and apply them to roots, OUs, and accounts:/n/n
svc$enable_all_features()
}
}
\keyword{internal}
|
library(tidyr)
## Read in map files
PheRS_map=read.table('disease_to_phecodes.txt', sep="\t",header=T,stringsAsFactors = F)
icd9_to_phecodes=read.table("icd9_to_phecode.txt", sep="\t",header=T,stringsAsFactors = F,colClasses = c('character','character'))
## Read in sample icd9 data for individuals
icd9s=read.table('icd9s_sample_data.txt',sep="\t",header=T)
### map individual icd9s to phecodes
phecodes=merge(icd9s,icd9_to_phecodes,by="icd9")
phecodes=unique(phecodes[c("ID", "phecode")])
phecodes$value = 1
phecodes <- spread(phecodes, phecode,value,fill=0)
## Calculate weights from icd9 data
pop_count=nrow(phecodes)
weights=data.frame(colSums(phecodes[2:length(phecodes)]))
weights$phecode= rownames(weights)
names(weights)[1]="case_count"
weights$prev = weights$case_count/pop_count
weights$w=log10(pop_count/weights$case_count)
## If you want to use weights generated from the adult VUMC cohort used in the discovery cohort, uncomment the next 2 lines
#weights=read.table('weights_VUMC_discovery.txt', sep="\t", header=T,stringsAsFactors = F)
#rownames(weights)=weights$phecode
## add weights to phecodes
phes=names(phecodes)[-1]
for(i in 1:length(phes)){
phe=phes[i]
phecodes[[phe]]=phecodes[[phe]]*weights[weights$phecode==phe,]$w
}
## Create PheRS score
MIMs=unique(PheRS_map$MIM)
PheRS=data.frame(phecodes$ID)
names(PheRS)[1]="ID"
for(i in 1:length(MIMs)){
MIM=MIMs[i]
phecode_list=as.character(PheRS_map[PheRS_map$MIM==MIM,]$phecodes)
phecode_list=strsplit(phecode_list,",")[[1]]
phecodes[phecode_list]
PheRS[MIM]=rowSums(phecodes[phecode_list],na.rm=T)
}
## Add FID and IID for Plink
PheRS$FID = PheRS$ID
names(PheRS)[1]="IID"
## Reorder columns
PheRS=PheRS[c("FID","IID",MIMs)]
## Write out a file formatted for Plink
write.table(PheRS,file="PheRS_file_sample.txt",quote=F,sep="\t",row.names=F,col.names=T)
| /PheRS code.R | no_license | labastar/PheRS | R | false | false | 1,855 | r | library(tidyr)
## Read in map files
PheRS_map=read.table('disease_to_phecodes.txt', sep="\t",header=T,stringsAsFactors = F)
icd9_to_phecodes=read.table("icd9_to_phecode.txt", sep="\t",header=T,stringsAsFactors = F,colClasses = c('character','character'))
## Read in sample icd9 data for individuals
icd9s=read.table('icd9s_sample_data.txt',sep="\t",header=T)
### map individual icd9s to phecodes
phecodes=merge(icd9s,icd9_to_phecodes,by="icd9")
phecodes=unique(phecodes[c("ID", "phecode")])
phecodes$value = 1
phecodes <- spread(phecodes, phecode,value,fill=0)
## Calculate weights from icd9 data
pop_count=nrow(phecodes)
weights=data.frame(colSums(phecodes[2:length(phecodes)]))
weights$phecode= rownames(weights)
names(weights)[1]="case_count"
weights$prev = weights$case_count/pop_count
weights$w=log10(pop_count/weights$case_count)
## If you want to use weights generated from the adult VUMC cohort used in the discovery cohort, uncomment the next 2 lines
#weights=read.table('weights_VUMC_discovery.txt', sep="\t", header=T,stringsAsFactors = F)
#rownames(weights)=weights$phecode
## add weights to phecodes
phes=names(phecodes)[-1]
for(i in 1:length(phes)){
phe=phes[i]
phecodes[[phe]]=phecodes[[phe]]*weights[weights$phecode==phe,]$w
}
## Create PheRS score
MIMs=unique(PheRS_map$MIM)
PheRS=data.frame(phecodes$ID)
names(PheRS)[1]="ID"
for(i in 1:length(MIMs)){
MIM=MIMs[i]
phecode_list=as.character(PheRS_map[PheRS_map$MIM==MIM,]$phecodes)
phecode_list=strsplit(phecode_list,",")[[1]]
phecodes[phecode_list]
PheRS[MIM]=rowSums(phecodes[phecode_list],na.rm=T)
}
## Add FID and IID for Plink
PheRS$FID = PheRS$ID
names(PheRS)[1]="IID"
## Reorder columns
PheRS=PheRS[c("FID","IID",MIMs)]
## Write out a file formatted for Plink
write.table(PheRS,file="PheRS_file_sample.txt",quote=F,sep="\t",row.names=F,col.names=T)
|
#' A Hello World Shiny app
#'
#' @import shiny
#' @importFrom graphics plot
#' @importFrom utils head
#' @importFrom roperators chr
#' @export
helloWorldApp <- function() {
utils::data(cars)
shinyApp(
ui = fluidPage(
sliderInput("n", "n", 1, nrow(cars), 10),
plotOutput("plot")
),
server = function(input, output) {
output$plot <- renderPlot({
a <- chr(9)
plot(head(cars, input$n), xlim = range(cars[[1]]), ylim = range(cars[[2]]))
})
}
)
}
# This is needed to make R CMD check happy
globalVariables("cars")
| /R/helloworld.R | no_license | freedin/shinytestPackageExample | R | false | false | 571 | r | #' A Hello World Shiny app
#'
#' @import shiny
#' @importFrom graphics plot
#' @importFrom utils head
#' @importFrom roperators chr
#' @export
helloWorldApp <- function() {
utils::data(cars)
shinyApp(
ui = fluidPage(
sliderInput("n", "n", 1, nrow(cars), 10),
plotOutput("plot")
),
server = function(input, output) {
output$plot <- renderPlot({
a <- chr(9)
plot(head(cars, input$n), xlim = range(cars[[1]]), ylim = range(cars[[2]]))
})
}
)
}
# This is needed to make R CMD check happy
globalVariables("cars")
|
# Fit polynomial models
#
# Formulas are entered via the polym function.
# Polym automatically formulates the full polynomial basis given its degree
# and names coefficients with 0/1 indicators, e.g. the name for term
# x1*x2^2 in a 3 variables model is "1.2.0".
# This naming pattern can be used to position the coefficients in the
# multi-dimentional arrays for the GloptiPolyR solver, with the help of
# string-manipulation using regular expression
#
# @inheritParams GloptiPolyRegion
# @return an object of class "lm"
fit_polym <- function(X, y, degree) {
data <- data.frame(X, y)
if (ncol(X) == 2) {
colnames(data) <- c("x1", "x2", "y")
model <- lm(y ~ polym(x1, x2, degree = degree, raw = TRUE), data = data)
} else if (ncol(X) == 3) {
colnames(data) <- c("x1", "x2", "x3", "y")
model <- lm(y ~ polym(x1, x2, x3, degree = degree, raw = TRUE), data = data)
} else if (ncol(X) == 4) {
colnames(data) <- c("x1", "x2", "x3", "x4", "y")
model <- lm(y ~ polym(x1, x2, x3, x4, degree = degree, raw = TRUE), data = data)
} else if (ncol(X) == 5) {
colnames(data) <- c("x1", "x2", "x3", "x4", "x5", "y")
model <- lm(y ~ polym(x1, x2, x3, x4, x5, degree = degree, raw = TRUE), data = data)
} else {
stop("The function only takes 2 - 5 factors.")
}
# return
model
}
# Get positions for monomial coefficients
#
# @param coefficients_name string vector of shape (1, p); it specifies the
# coefficient names following the polym pattern, e.g.,
# the name for x1*x2^2 in a 3-variable model is "1.2.0"
# @param k integer scalor; it specifies the number of variables
# @return integer matrix of shape (p, k); its (i, j) element speficies the
# (power + 1) value of the jth variable in the ith monomial term,
# (power + 1) accommodating the zero power; its ith row specifies
# the position of the coefficient of the ith nomomial term in the
# multi-dimensional array of the GloptiPolyR solver
#' @importFrom magrittr "%>%"
coef_name_to_array_index <- function(coefficients_name, k) {
array_index_string <- stringr::str_extract(coefficients_name, "(\\d\\.)+[\\d]")
array_index_number <- matrix(NA, length(array_index_string), k)
array_index_number[1, ] <- 1
for (i in 2:length(array_index_string)) {
array_index_number[i, ] <- array_index_string[i] %>%
stringr::str_split("\\.") %>%
unlist() %>%
as.numeric() + 1
}
# return
array_index_number
}
# Optimize fitted polynomial functions via GloptiPolyR
#
# @param coefficients numeric vector of shape (1, p); it specifies the the
# coefficients of an "lm" objected formulated with the
# polym function
# @param k integer scalor; it specifies the number of variables
# @inheritParams GloptiPolyRegion
# @return the optimal solution and its corresponding objective value
Gloptipolym <- function(coefficients, k, degree, lb, ub, maximization) {
Ps <- list() # argument for GloptiPolyR, a list of lists
# Objective function ------------------------------------------------------
P <- list()
c <- array(0, dim = rep(degree + 1, k))
# get position indices for the coefficients of the objective function
id <- coef_name_to_array_index(names(coefficients), k = k)
# put coefficient values into the multi-dimensional array
for (i in 1:nrow(id)) {
eval(parse(text = paste(
"c[", toString(id[i, ]),
"] <- coefficients[", i, "]"
)))
# example 1: eval(parse(text = "1+1")) -> 2
# example 2: toString(id[1,]) -> "1, 1, 1"
}
if (maximization) { # assume GloptiPolyR only supports "min"
P$c <- -c
} else {
P$c <- c
}
P$t <- "min" # specify attribute
Ps[[1]] <- P # add to list
# Constraint functions ----------------------------------------------------
for (i in 1:k) { # loop through variables
# Lower bound constraint
P <- list()
c <- array(0, dim = rep(degree + 1, k))
# specify coefficient 1 of the variable
index_for_c <- rep(1, k)
index_for_c[i] <- 2
eval(parse(text = paste("c[", toString(index_for_c), "] <- 1")))
# specify the constraint constant
eval(parse(text = paste("c[", toString(rep(1, k)), "] <- -lb[", i, "]")))
P$c <- c
P$t <- ">=" # specify attribute
Ps[[2 * i]] <- P # add to list
# Upper bound constraint
P <- list()
c <- array(0, dim = rep(degree + 1, k))
# specify coefficient 1 of the variable
index_for_c <- rep(1, k)
index_for_c[i] <- 2
eval(parse(text = paste("c[", toString(index_for_c), "] <- 1")))
# specify the constraint constant
eval(parse(text = paste("c[", toString(rep(1, k)), "] <- -ub[", i, "]")))
P$c <- c
P$t <- "<="
Ps[[2 * i + 1]] <- P
}
# Call GloptiPolyR --------------------------------------------------------
res <- GloptiPolyR(Ps)
solution <- res$solution
if (maximization) { # assume GloptiPolyR only supports "min"
objective <- -res$objective
} else {
objective <- res$objective
}
# return
list(solution = solution, objective = objective)
}
| /R/helper-functions-integrate-GloptiPolyR.R | no_license | cran/OptimaRegion | R | false | false | 5,101 | r | # Fit polynomial models
#
# Formulas are entered via the polym function.
# Polym automatically formulates the full polynomial basis given its degree
# and names coefficients with 0/1 indicators, e.g. the name for term
# x1*x2^2 in a 3 variables model is "1.2.0".
# This naming pattern can be used to position the coefficients in the
# multi-dimentional arrays for the GloptiPolyR solver, with the help of
# string-manipulation using regular expression
#
# @inheritParams GloptiPolyRegion
# @return an object of class "lm"
fit_polym <- function(X, y, degree) {
data <- data.frame(X, y)
if (ncol(X) == 2) {
colnames(data) <- c("x1", "x2", "y")
model <- lm(y ~ polym(x1, x2, degree = degree, raw = TRUE), data = data)
} else if (ncol(X) == 3) {
colnames(data) <- c("x1", "x2", "x3", "y")
model <- lm(y ~ polym(x1, x2, x3, degree = degree, raw = TRUE), data = data)
} else if (ncol(X) == 4) {
colnames(data) <- c("x1", "x2", "x3", "x4", "y")
model <- lm(y ~ polym(x1, x2, x3, x4, degree = degree, raw = TRUE), data = data)
} else if (ncol(X) == 5) {
colnames(data) <- c("x1", "x2", "x3", "x4", "x5", "y")
model <- lm(y ~ polym(x1, x2, x3, x4, x5, degree = degree, raw = TRUE), data = data)
} else {
stop("The function only takes 2 - 5 factors.")
}
# return
model
}
# Get positions for monomial coefficients
#
# @param coefficients_name string vector of shape (1, p); it specifies the
# coefficient names following the polym pattern, e.g.,
# the name for x1*x2^2 in a 3-variable model is "1.2.0"
# @param k integer scalor; it specifies the number of variables
# @return integer matrix of shape (p, k); its (i, j) element speficies the
# (power + 1) value of the jth variable in the ith monomial term,
# (power + 1) accommodating the zero power; its ith row specifies
# the position of the coefficient of the ith nomomial term in the
# multi-dimensional array of the GloptiPolyR solver
#' @importFrom magrittr "%>%"
coef_name_to_array_index <- function(coefficients_name, k) {
array_index_string <- stringr::str_extract(coefficients_name, "(\\d\\.)+[\\d]")
array_index_number <- matrix(NA, length(array_index_string), k)
array_index_number[1, ] <- 1
for (i in 2:length(array_index_string)) {
array_index_number[i, ] <- array_index_string[i] %>%
stringr::str_split("\\.") %>%
unlist() %>%
as.numeric() + 1
}
# return
array_index_number
}
# Optimize fitted polynomial functions via GloptiPolyR
#
# @param coefficients numeric vector of shape (1, p); it specifies the the
# coefficients of an "lm" objected formulated with the
# polym function
# @param k integer scalor; it specifies the number of variables
# @inheritParams GloptiPolyRegion
# @return the optimal solution and its corresponding objective value
Gloptipolym <- function(coefficients, k, degree, lb, ub, maximization) {
Ps <- list() # argument for GloptiPolyR, a list of lists
# Objective function ------------------------------------------------------
P <- list()
c <- array(0, dim = rep(degree + 1, k))
# get position indices for the coefficients of the objective function
id <- coef_name_to_array_index(names(coefficients), k = k)
# put coefficient values into the multi-dimensional array
for (i in 1:nrow(id)) {
eval(parse(text = paste(
"c[", toString(id[i, ]),
"] <- coefficients[", i, "]"
)))
# example 1: eval(parse(text = "1+1")) -> 2
# example 2: toString(id[1,]) -> "1, 1, 1"
}
if (maximization) { # assume GloptiPolyR only supports "min"
P$c <- -c
} else {
P$c <- c
}
P$t <- "min" # specify attribute
Ps[[1]] <- P # add to list
# Constraint functions ----------------------------------------------------
for (i in 1:k) { # loop through variables
# Lower bound constraint
P <- list()
c <- array(0, dim = rep(degree + 1, k))
# specify coefficient 1 of the variable
index_for_c <- rep(1, k)
index_for_c[i] <- 2
eval(parse(text = paste("c[", toString(index_for_c), "] <- 1")))
# specify the constraint constant
eval(parse(text = paste("c[", toString(rep(1, k)), "] <- -lb[", i, "]")))
P$c <- c
P$t <- ">=" # specify attribute
Ps[[2 * i]] <- P # add to list
# Upper bound constraint
P <- list()
c <- array(0, dim = rep(degree + 1, k))
# specify coefficient 1 of the variable
index_for_c <- rep(1, k)
index_for_c[i] <- 2
eval(parse(text = paste("c[", toString(index_for_c), "] <- 1")))
# specify the constraint constant
eval(parse(text = paste("c[", toString(rep(1, k)), "] <- -ub[", i, "]")))
P$c <- c
P$t <- "<="
Ps[[2 * i + 1]] <- P
}
# Call GloptiPolyR --------------------------------------------------------
res <- GloptiPolyR(Ps)
solution <- res$solution
if (maximization) { # assume GloptiPolyR only supports "min"
objective <- -res$objective
} else {
objective <- res$objective
}
# return
list(solution = solution, objective = objective)
}
|
library(tidyverse)
deep_hollow_bedding <- read_tsv(
"data-raw/deep_hollow_bedding.txt",
col_types = cols(
.default = col_double(),
`Dip Quad` = col_character(),
Units = col_character()
)
) %>%
mutate(type = "bedding")
deep_hollow_cleavage <- read_tsv(
"data-raw/deep_hollow_cleavage.txt",
col_types = cols(
.default = col_double(),
`Dip Quad` = col_character(),
Units = col_character()
)
) %>%
mutate(type = "cleavage")
deep_hollow <- bind_rows(deep_hollow_bedding, deep_hollow_cleavage) %>%
select(type, strike = Strike, dip = Dip, dip_quad = `Dip Quad`)
usethis::use_data(deep_hollow, overwrite = TRUE)
| /data-raw/collect_stnt_data.R | no_license | paleolimbot/ggstereo | R | false | false | 653 | r |
library(tidyverse)
deep_hollow_bedding <- read_tsv(
"data-raw/deep_hollow_bedding.txt",
col_types = cols(
.default = col_double(),
`Dip Quad` = col_character(),
Units = col_character()
)
) %>%
mutate(type = "bedding")
deep_hollow_cleavage <- read_tsv(
"data-raw/deep_hollow_cleavage.txt",
col_types = cols(
.default = col_double(),
`Dip Quad` = col_character(),
Units = col_character()
)
) %>%
mutate(type = "cleavage")
deep_hollow <- bind_rows(deep_hollow_bedding, deep_hollow_cleavage) %>%
select(type, strike = Strike, dip = Dip, dip_quad = `Dip Quad`)
usethis::use_data(deep_hollow, overwrite = TRUE)
|
testlist <- list(n = 757923840L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) | /breakfast/inst/testfiles/setBitNumber/libFuzzer_setBitNumber/setBitNumber_valgrind_files/1609963156-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 97 | r | testlist <- list(n = 757923840L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) |
library(HTDoseResponseCurve)
library(drc)
context("fit and summary methods")
test_that("fit works", {
# # line 1 has measurements at 24 and 48 hours while line 2 has only 48 hours
# ds1 = create_dataset(
# sample_types= c("line1","line1","line1","line1","line1","line1",
# "line2","line2","line2"),
# treatments = c("DMSO","drug1","drug2","DMSO","drug1","drug2",
# "DMSO","drug1","drug2"),
# concentrations = c(0, 100, 200, 0, 100, 200,
# 0, 100, 200),
# hours = c(24, 24, 24, 48, 48, 48,
# 48, 48, 48),
# values = c(100, 90, 20, 99, 80, 15,
# 100, 89, 87),
# plate_id = "plate_1",
# negative_control = "DMSO")
#
# fits = fit_statistics(ds1, drc::LL.4() )
#
# ds2 = create_dataset(
# sample_types= c("line1","line1","line1","line1",
# "line1","line1","line1","line1",
#
# "line2","line2","line2","line2",
# "line2","line2","line2","line2",
#
# "line3","line3","line3","line3",
# "line3","line3","line3","line3"),
# treatments = rep(c("DMSO","drug1","drug1","drug1"), 6),
# concentrations = c(0, 50, 100, 200,
# 0, 50, 100, 200,
#
# 0, 50, 100, 200,
# 0, 50, 100, 200,
#
# 0, 50, 100, 200,
# 0, 50, 100, 200),
# hours = rep(0, 24),
# values = c(100, 90, 85, 72,
# 99, 92, 83, 79,
#
# 97, 80, 40, 15,
# 95, 78, 38, 13,
#
# 96, 60, 35, 10,
# 94, 58, 33, 12),
# plate_id = "plate_1",
# negative_control = "DMSO")
#
# fit=fit_DRC( ds2,
# sample_types = c("line1", "line2", "line3"),
# treatments = c("drug1"),
# hour=0, fct=drc::LL.3() )
# fpc=HT_fit_plot_colors( fit )
# plot( fit,
# xlim=c(0, 1e5), ylim=c(0, 1.2),
# lwd=3,
# main="test",
# ylab="surviving fraction",
# xlab="nM")
# legend( 2, 0.5, legend = get.split.col(fpc$condition, "_|_", first = TRUE),
# fill=fpc$col, bty="n")
#
# fits = fit_statistics(ds2, drc::LL.4() )
#
} )
| /tests/testthat/testFit.R | no_license | DavidQuigley/HTDoseResponseCurve | R | false | false | 2,518 | r | library(HTDoseResponseCurve)
library(drc)
context("fit and summary methods")
test_that("fit works", {
# # line 1 has measurements at 24 and 48 hours while line 2 has only 48 hours
# ds1 = create_dataset(
# sample_types= c("line1","line1","line1","line1","line1","line1",
# "line2","line2","line2"),
# treatments = c("DMSO","drug1","drug2","DMSO","drug1","drug2",
# "DMSO","drug1","drug2"),
# concentrations = c(0, 100, 200, 0, 100, 200,
# 0, 100, 200),
# hours = c(24, 24, 24, 48, 48, 48,
# 48, 48, 48),
# values = c(100, 90, 20, 99, 80, 15,
# 100, 89, 87),
# plate_id = "plate_1",
# negative_control = "DMSO")
#
# fits = fit_statistics(ds1, drc::LL.4() )
#
# ds2 = create_dataset(
# sample_types= c("line1","line1","line1","line1",
# "line1","line1","line1","line1",
#
# "line2","line2","line2","line2",
# "line2","line2","line2","line2",
#
# "line3","line3","line3","line3",
# "line3","line3","line3","line3"),
# treatments = rep(c("DMSO","drug1","drug1","drug1"), 6),
# concentrations = c(0, 50, 100, 200,
# 0, 50, 100, 200,
#
# 0, 50, 100, 200,
# 0, 50, 100, 200,
#
# 0, 50, 100, 200,
# 0, 50, 100, 200),
# hours = rep(0, 24),
# values = c(100, 90, 85, 72,
# 99, 92, 83, 79,
#
# 97, 80, 40, 15,
# 95, 78, 38, 13,
#
# 96, 60, 35, 10,
# 94, 58, 33, 12),
# plate_id = "plate_1",
# negative_control = "DMSO")
#
# fit=fit_DRC( ds2,
# sample_types = c("line1", "line2", "line3"),
# treatments = c("drug1"),
# hour=0, fct=drc::LL.3() )
# fpc=HT_fit_plot_colors( fit )
# plot( fit,
# xlim=c(0, 1e5), ylim=c(0, 1.2),
# lwd=3,
# main="test",
# ylab="surviving fraction",
# xlab="nM")
# legend( 2, 0.5, legend = get.split.col(fpc$condition, "_|_", first = TRUE),
# fill=fpc$col, bty="n")
#
# fits = fit_statistics(ds2, drc::LL.4() )
#
} )
|
# trying out tm main tutorial
# get files in texts directory
txt <- system.file("texts", "txt", package = "tm")
(ovid <- VCorpus(DirSource(txt, encoding = "UTF-8"), readerControl = list(language = "lat")))
#(variable) is the same as print(variable)
# read directly from vector sources
docs = c("This is tweet A", "This is tweet B");
VCorpus(VectorSource(docs));
# reuters XML file
#install.packages("XML")
reut21578 <- system.file("texts", "crude", package = "tm")
reuters <- VCorpus(DirSource(reut21578), readerControl = list(reader = readReut21578XMLasPlain))
# export
writeCorpus(ovid);
# little details
print(ovid);
# more details
inspect(ovid[1:2]);
# meta
meta(ovid[[2]]);
# access id
meta(ovid[[2]], "id");
# test if identical
identical(ovid[[2]], ovid[["ovid_2.txt"]])
# get characters
writeLines(as.character(ovid[[2]]))
# lapply applies function X to all elements in the list
lapply(ovid[1:2], as.character)
# TRANSFORMATIONS --------
# remove whitespaces
reuters <- tm_map(reuters, stripWhitespace)
# convert to lower case
reuters <- tm_map(reuters, content_transformer(tolower))
# remove stop words
reuters <- tm_map(reuters, removeWords, stopwords("english"))
# stemming
reuters_stemmed = tm_map(reuters, stemDocument)
writeLines(as.character(reuters_stemmed[[2]]))
# filter documents with ID 237 and specific headline
idx <- meta(reuters, "id") == '237' & meta(reuters, "heading") == 'INDONESIA SEEN AT CROSSROADS OVER ECONOMIC CHANGE'
reuters[idx]
# access/modify meta data
DublinCore(ovid[[1]], "Creator") = "Ano Nymous"
# see meta data
meta(ovid[[1]])
# corpus vs index meta data
meta(ovid[[1]], tag = "test", type = "corpus") <- "test meta"
meta(ovid[[1]], type = "corpus")
meta(ovid[[1]], "foo") <- letters[1:20]
| /SemanticTextAnalysis/SemanticTextAnalysis_Project/TM_tutorial.R | no_license | Wikzo/Hagenberg_2015 | R | false | false | 1,754 | r | # trying out tm main tutorial
# get files in texts directory
txt <- system.file("texts", "txt", package = "tm")
(ovid <- VCorpus(DirSource(txt, encoding = "UTF-8"), readerControl = list(language = "lat")))
#(variable) is the same as print(variable)
# read directly from vector sources
docs = c("This is tweet A", "This is tweet B");
VCorpus(VectorSource(docs));
# reuters XML file
#install.packages("XML")
reut21578 <- system.file("texts", "crude", package = "tm")
reuters <- VCorpus(DirSource(reut21578), readerControl = list(reader = readReut21578XMLasPlain))
# export
writeCorpus(ovid);
# little details
print(ovid);
# more details
inspect(ovid[1:2]);
# meta
meta(ovid[[2]]);
# access id
meta(ovid[[2]], "id");
# test if identical
identical(ovid[[2]], ovid[["ovid_2.txt"]])
# get characters
writeLines(as.character(ovid[[2]]))
# lapply applies function X to all elements in the list
lapply(ovid[1:2], as.character)
# TRANSFORMATIONS --------
# remove whitespaces
reuters <- tm_map(reuters, stripWhitespace)
# convert to lower case
reuters <- tm_map(reuters, content_transformer(tolower))
# remove stop words
reuters <- tm_map(reuters, removeWords, stopwords("english"))
# stemming
reuters_stemmed = tm_map(reuters, stemDocument)
writeLines(as.character(reuters_stemmed[[2]]))
# filter documents with ID 237 and specific headline
idx <- meta(reuters, "id") == '237' & meta(reuters, "heading") == 'INDONESIA SEEN AT CROSSROADS OVER ECONOMIC CHANGE'
reuters[idx]
# access/modify meta data
DublinCore(ovid[[1]], "Creator") = "Ano Nymous"
# see meta data
meta(ovid[[1]])
# corpus vs index meta data
meta(ovid[[1]], tag = "test", type = "corpus") <- "test meta"
meta(ovid[[1]], type = "corpus")
meta(ovid[[1]], "foo") <- letters[1:20]
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{scaleProposal}
\alias{scaleProposal}
\title{Propose a multiplicatively uniform variable.}
\usage{
scaleProposal(A)
}
\arguments{
\item{A}{the scale of the proposal, 2 is a big jump, 1.1 is a small jump.}
}
\value{
a draw from a symmetrical (on the multiplicative scale) random variable
}
\description{
Taken from the proposal used for precisions in the C library GMRFLib by Havard Rue
and described in several papers and his book on GMRFs with Leonard Held.
}
\author{
Colin Millar \email{colin.millar@jrc.ec.europa.eu}
}
| /man/scaleProposal.Rd | no_license | AndyCampbell/msy | R | false | false | 582 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{scaleProposal}
\alias{scaleProposal}
\title{Propose a multiplicatively uniform variable.}
\usage{
scaleProposal(A)
}
\arguments{
\item{A}{the scale of the proposal, 2 is a big jump, 1.1 is a small jump.}
}
\value{
a draw from a symmetrical (on the multiplicative scale) random variable
}
\description{
Taken from the proposal used for precisions in the C library GMRFLib by Havard Rue
and described in several papers and his book on GMRFs with Leonard Held.
}
\author{
Colin Millar \email{colin.millar@jrc.ec.europa.eu}
}
|
#' Fits a Bayesian concentration-response model for target-time survival analysis
#'
#' This function estimates the parameters of an concentration-response
#' model for target-time survival analysis using Bayesian inference. In this model,
#' the survival rate of individuals at a given time point (called target time) is modeled
#' as a function of the chemical compound concentration. The actual number of
#' surviving individuals is then modeled as a stochastic function of the survival
#' rate. Details of the model are presented in the
#' vignette accompanying the package.
#'
#' The function returns
#' parameter estimates of the concentration-response model and estimates of the so-called
#' \eqn{LC_x}, that is the concentration of chemical compound required to get an \eqn{(1 - x/100)} survival rate.
#'
#' @param data an object of class \code{survData}
#' @param target.time the chosen endpoint to evaluate the effect of the chemical compound
#' concentration, by default the last time point available for
#' all concentrations
#' @param lcx desired values of \eqn{x} (in percent) for which to compute
#' \eqn{LC_x}.
#' @param n.chains number of MCMC chains, the minimum required number of chains
#' is 2
#' @param quiet if \code{TRUE}, does not print messages and progress bars from
#' JAGS
#' @param \dots Further arguments to be passed to generic methods
#'
#' @return The function returns an object of class \code{survFitTT}, which is a
#' list with the following information:
#' \item{estim.LCx}{a table of the estimated \eqn{LC_x} along with their 95\%
#' credible intervals}
#' \item{estim.par}{a table of the estimated parameters (medians) and 95\%
#' credible intervals}
#' \item{det.part}{the name of the deterministic part of the used model}
#' \item{mcmc}{an object of class \code{mcmc.list} with the posterior
#' distribution}
#' \item{warnings}{a table with warning messages}
#' \item{model}{a JAGS model object}
#' \item{parameters}{a list of parameter names used in the model}
#' \item{n.chains}{an integer value corresponding to the number of chains used
#' for the MCMC computation}
#' \item{n.iter}{a list of two indices indicating the beginning and the end of
#' monitored iterations}
#' \item{n.thin}{a numerical value corresponding to the thinning interval}
#' \item{jags.data}{a list of the data passed to the JAGS model}
#' \item{transformed.data}{the \code{survData} object passed to the function}
#' \item{dataTT}{the dataset with which the parameters are estimated}
#'
#' @keywords estimation
#
#' @examples
#'
#' # (1) Load the data
#' data(cadmium1)
#'
#' # (2) Create an object of class "survData"
#' dat <- survData(cadmium1)
#'
#' \dontrun{
#' # (3) Run the survFitTT function with the log-logistic
#' # binomial model
#' out <- survFitTT(dat, lcx = c(5, 10, 15, 20, 30, 50, 80),
#' quiet = TRUE)
#' }
#'
#' @import rjags
#' @importFrom dplyr filter
#'
#' @export
survFitTT.survDataCstExp <- function(data,
target.time = NULL,
lcx = c(5, 10, 20, 50),
n.chains = 3,
quiet = FALSE,
...) {
# test class object
if(! is(data, "survDataCstExp"))
stop("survFitTT: object of class survDataCstExp expected")
# select Data at target.time and pool replicates
dataTT <- selectDataTT(data, target.time)
# Gather replicates according to time and conc
dataTT <- cbind(aggregate(cbind(Nsurv, Ninit) ~ time + conc, dataTT, sum), replicate = 1)
# Choose model by testing mortality in the control
control <- filter(dataTT, conc == 0)
det.part <-
if (any(control$Nsurv < control$Ninit)) "loglogisticbinom_3"
else "loglogisticbinom_2"
# select model text
if (det.part == "loglogisticbinom_2") {
model.text <- llbinom2.model.text
}
if (det.part == "loglogisticbinom_3") {
model.text <- llbinom3.model.text
}
# parameters
parameters <- if (det.part == "loglogisticbinom_2") {
c("log10b", "log10e")
} else {
if (det.part == "loglogisticbinom_3") {
c("log10b", "d", "log10e")}
}
# create priors parameters
jags.data <- survCreateJagsData(det.part, dataTT)
# Define model
model <- survLoadModel(model.program = model.text,
data = jags.data, n.chains,
Nadapt = 3000, quiet)
# Determine sampling parameters
sampling.parameters <- modelSamplingParameters(model,
parameters, n.chains, quiet)
if (sampling.parameters$niter > 100000)
stop("The model needs too many iterations to provide reliable parameter estimates !")
# Sampling
prog.b <- ifelse(quiet == TRUE, "none", "text")
mcmc <- coda.samples(model, parameters,
n.iter = sampling.parameters$niter,
thin = sampling.parameters$thin,
progress.bar = prog.b)
# summarize estime.par et CIs
# calculate from the estimated parameters
estim.par <- survPARAMS(mcmc, det.part)
# LCx calculation estimated LCx and their CIs 95%
# vector of LCX
estim.LCx <- estimXCX(mcmc, lcx, "LC")
# check if estimated LC50 lies in the tested concentration range
warnings <- msgTableCreate()
LC50 <- log10(estim.par["e", "median"])
if (!(min(log10(data$conc)) < LC50 & LC50 < max(log10(data$conc)))){
##store warning in warnings table
msg <- "The LC50 estimation (model parameter e) lies outside the range of tested concentrations and may be unreliable as the prior distribution on this parameter is defined from this range !"
warnings <- msgTableAdd(warnings, "LC50outRange", msg)
## print the message
warning(msg, call. = FALSE)
}
# output
OUT <- list(estim.LCx = estim.LCx,
estim.par = estim.par,
det.part = det.part,
mcmc = mcmc,
warnings = warnings,
model = model,
parameters = parameters,
n.chains = summary(mcmc)$nchain,
n.iter = list(start = summary(mcmc)$start,
end = summary(mcmc)$end),
n.thin = summary(mcmc)$thin,
jags.data = jags.data,
transformed.data = data,
dataTT = dataTT)
class(OUT) <- "survFitTT"
return(OUT)
}
survCreateJagsData <- function(det.part, data) {
# Creates the parameters to define the prior of the log-logistic binomial model
# INPUTS
# det.part: model name
# data: object of class survData
# OUTPUT
# jags.data : list of data required for the jags.model function
# Parameter calculation of concentration min and max
concmin <- min(sort(unique(data$conc))[-1])
concmax <- max(data$conc)
# Create prior parameters for the log logistic model
# Params to define e
meanlog10e <- (log10(concmin) + log10(concmax)) / 2
sdlog10e <- (log10(concmax) - log10(concmin)) / 4
taulog10e <- 1 / sdlog10e^2
# Params to define b
log10bmin <- -2
log10bmax <- 2
# list of data use by jags
jags.data <- list(meanlog10e = meanlog10e,
Ninit = data$Ninit,
Nsurv = data$Nsurv,
taulog10e = taulog10e,
log10bmin = log10bmin,
log10bmax = log10bmax,
n = length(data$conc),
xconc = data$conc)
# list of data use by jags
if (det.part == "loglogisticbinom_3") {
jags.data <- c(jags.data,
dmin = 0,
dmax = 1)
}
return(jags.data)
}
survPARAMS <- function(mcmc, det.part) {
# create the table of posterior estimated parameters
# for the survival analyses
# INPUT:
# - mcmc: list of estimated parameters for the model with each item representing
# a chain
# OUTPUT:
# - data frame with 3 columns (values, CIinf, CIsup) and 3-4rows (the estimated
# parameters)
# Retrieving parameters of the model
res.M <- summary(mcmc)
if (det.part == "loglogisticbinom_3") {
d <- res.M$quantiles["d", "50%"]
dinf <- res.M$quantiles["d", "2.5%"]
dsup <- res.M$quantiles["d", "97.5%"]
}
# for loglogisticbinom_2 and 3
b <- 10^res.M$quantiles["log10b", "50%"]
e <- 10^res.M$quantiles["log10e", "50%"]
binf <- 10^res.M$quantiles["log10b", "2.5%"]
einf <- 10^res.M$quantiles["log10e", "2.5%"]
bsup <- 10^res.M$quantiles["log10b", "97.5%"]
esup <- 10^res.M$quantiles["log10e", "97.5%"]
# Definition of the parameter storage and storage data
# If Poisson Model
if (det.part == "loglogisticbinom_3") {
# if mortality in control
rownames <- c("b", "d", "e")
params <- c(b, d, e)
CIinf <- c(binf, dinf, einf)
CIsup <- c(bsup, dsup, esup)
} else {
# if no mortality in control
# Definition of the parameter storage and storage data
rownames <- c("b", "e")
params <- c(b, e)
CIinf <- c(binf, einf)
CIsup <- c(bsup, esup)
}
res <- data.frame(median = params, Q2.5 = CIinf, Q97.5 = CIsup,
row.names = rownames)
return(res)
}
llbinom3.model.text <- "\nmodel # Loglogistic binomial model with 3 parameters\n\t\t{\t\nfor (i in 1:n)\n{\np[i] <- d/ (1 + (xconc[i]/e)^b)\nNsurv[i]~ dbin(p[i], Ninit[i])\n}\n\n# specification of priors (may be changed if needed)\nd ~ dunif(dmin, dmax)\nlog10b ~ dunif(log10bmin, log10bmax)\nlog10e ~ dnorm(meanlog10e, taulog10e)\n\nb <- pow(10, log10b)\ne <- pow(10, log10e)\n}\n"
llbinom2.model.text <- "\nmodel # Loglogistic binomial model with 2 parameters\n\t\t{\t\nfor (i in 1:n)\n{\np[i] <- 1/ (1 + (xconc[i]/e)^b)\nNsurv[i]~ dbin(p[i], Ninit[i])\n}\n\n# specification of priors (may be changed if needed)\nlog10b ~ dunif(log10bmin, log10bmax)\nlog10e ~ dnorm(meanlog10e, taulog10e)\n\nb <- pow(10, log10b)\ne <- pow(10, log10e)\n}\n"
| /R/survFitTT.survDataCstExp.R | no_license | philipperuiz/morse | R | false | false | 9,886 | r | #' Fits a Bayesian concentration-response model for target-time survival analysis
#'
#' This function estimates the parameters of an concentration-response
#' model for target-time survival analysis using Bayesian inference. In this model,
#' the survival rate of individuals at a given time point (called target time) is modeled
#' as a function of the chemical compound concentration. The actual number of
#' surviving individuals is then modeled as a stochastic function of the survival
#' rate. Details of the model are presented in the
#' vignette accompanying the package.
#'
#' The function returns
#' parameter estimates of the concentration-response model and estimates of the so-called
#' \eqn{LC_x}, that is the concentration of chemical compound required to get an \eqn{(1 - x/100)} survival rate.
#'
#' @param data an object of class \code{survData}
#' @param target.time the chosen endpoint to evaluate the effect of the chemical compound
#' concentration, by default the last time point available for
#' all concentrations
#' @param lcx desired values of \eqn{x} (in percent) for which to compute
#' \eqn{LC_x}.
#' @param n.chains number of MCMC chains, the minimum required number of chains
#' is 2
#' @param quiet if \code{TRUE}, does not print messages and progress bars from
#' JAGS
#' @param \dots Further arguments to be passed to generic methods
#'
#' @return The function returns an object of class \code{survFitTT}, which is a
#' list with the following information:
#' \item{estim.LCx}{a table of the estimated \eqn{LC_x} along with their 95\%
#' credible intervals}
#' \item{estim.par}{a table of the estimated parameters (medians) and 95\%
#' credible intervals}
#' \item{det.part}{the name of the deterministic part of the used model}
#' \item{mcmc}{an object of class \code{mcmc.list} with the posterior
#' distribution}
#' \item{warnings}{a table with warning messages}
#' \item{model}{a JAGS model object}
#' \item{parameters}{a list of parameter names used in the model}
#' \item{n.chains}{an integer value corresponding to the number of chains used
#' for the MCMC computation}
#' \item{n.iter}{a list of two indices indicating the beginning and the end of
#' monitored iterations}
#' \item{n.thin}{a numerical value corresponding to the thinning interval}
#' \item{jags.data}{a list of the data passed to the JAGS model}
#' \item{transformed.data}{the \code{survData} object passed to the function}
#' \item{dataTT}{the dataset with which the parameters are estimated}
#'
#' @keywords estimation
#
#' @examples
#'
#' # (1) Load the data
#' data(cadmium1)
#'
#' # (2) Create an object of class "survData"
#' dat <- survData(cadmium1)
#'
#' \dontrun{
#' # (3) Run the survFitTT function with the log-logistic
#' # binomial model
#' out <- survFitTT(dat, lcx = c(5, 10, 15, 20, 30, 50, 80),
#' quiet = TRUE)
#' }
#'
#' @import rjags
#' @importFrom dplyr filter
#'
#' @export
survFitTT.survDataCstExp <- function(data,
target.time = NULL,
lcx = c(5, 10, 20, 50),
n.chains = 3,
quiet = FALSE,
...) {
# test class object
if(! is(data, "survDataCstExp"))
stop("survFitTT: object of class survDataCstExp expected")
# select Data at target.time and pool replicates
dataTT <- selectDataTT(data, target.time)
# Gather replicates according to time and conc
dataTT <- cbind(aggregate(cbind(Nsurv, Ninit) ~ time + conc, dataTT, sum), replicate = 1)
# Choose model by testing mortality in the control
control <- filter(dataTT, conc == 0)
det.part <-
if (any(control$Nsurv < control$Ninit)) "loglogisticbinom_3"
else "loglogisticbinom_2"
# select model text
if (det.part == "loglogisticbinom_2") {
model.text <- llbinom2.model.text
}
if (det.part == "loglogisticbinom_3") {
model.text <- llbinom3.model.text
}
# parameters
parameters <- if (det.part == "loglogisticbinom_2") {
c("log10b", "log10e")
} else {
if (det.part == "loglogisticbinom_3") {
c("log10b", "d", "log10e")}
}
# create priors parameters
jags.data <- survCreateJagsData(det.part, dataTT)
# Define model
model <- survLoadModel(model.program = model.text,
data = jags.data, n.chains,
Nadapt = 3000, quiet)
# Determine sampling parameters
sampling.parameters <- modelSamplingParameters(model,
parameters, n.chains, quiet)
if (sampling.parameters$niter > 100000)
stop("The model needs too many iterations to provide reliable parameter estimates !")
# Sampling
prog.b <- ifelse(quiet == TRUE, "none", "text")
mcmc <- coda.samples(model, parameters,
n.iter = sampling.parameters$niter,
thin = sampling.parameters$thin,
progress.bar = prog.b)
# summarize estime.par et CIs
# calculate from the estimated parameters
estim.par <- survPARAMS(mcmc, det.part)
# LCx calculation estimated LCx and their CIs 95%
# vector of LCX
estim.LCx <- estimXCX(mcmc, lcx, "LC")
# check if estimated LC50 lies in the tested concentration range
warnings <- msgTableCreate()
LC50 <- log10(estim.par["e", "median"])
if (!(min(log10(data$conc)) < LC50 & LC50 < max(log10(data$conc)))){
##store warning in warnings table
msg <- "The LC50 estimation (model parameter e) lies outside the range of tested concentrations and may be unreliable as the prior distribution on this parameter is defined from this range !"
warnings <- msgTableAdd(warnings, "LC50outRange", msg)
## print the message
warning(msg, call. = FALSE)
}
# output
OUT <- list(estim.LCx = estim.LCx,
estim.par = estim.par,
det.part = det.part,
mcmc = mcmc,
warnings = warnings,
model = model,
parameters = parameters,
n.chains = summary(mcmc)$nchain,
n.iter = list(start = summary(mcmc)$start,
end = summary(mcmc)$end),
n.thin = summary(mcmc)$thin,
jags.data = jags.data,
transformed.data = data,
dataTT = dataTT)
class(OUT) <- "survFitTT"
return(OUT)
}
survCreateJagsData <- function(det.part, data) {
# Creates the parameters to define the prior of the log-logistic binomial model
# INPUTS
# det.part: model name
# data: object of class survData
# OUTPUT
# jags.data : list of data required for the jags.model function
# Parameter calculation of concentration min and max
concmin <- min(sort(unique(data$conc))[-1])
concmax <- max(data$conc)
# Create prior parameters for the log logistic model
# Params to define e
meanlog10e <- (log10(concmin) + log10(concmax)) / 2
sdlog10e <- (log10(concmax) - log10(concmin)) / 4
taulog10e <- 1 / sdlog10e^2
# Params to define b
log10bmin <- -2
log10bmax <- 2
# list of data use by jags
jags.data <- list(meanlog10e = meanlog10e,
Ninit = data$Ninit,
Nsurv = data$Nsurv,
taulog10e = taulog10e,
log10bmin = log10bmin,
log10bmax = log10bmax,
n = length(data$conc),
xconc = data$conc)
# list of data use by jags
if (det.part == "loglogisticbinom_3") {
jags.data <- c(jags.data,
dmin = 0,
dmax = 1)
}
return(jags.data)
}
survPARAMS <- function(mcmc, det.part) {
# create the table of posterior estimated parameters
# for the survival analyses
# INPUT:
# - mcmc: list of estimated parameters for the model with each item representing
# a chain
# OUTPUT:
# - data frame with 3 columns (values, CIinf, CIsup) and 3-4rows (the estimated
# parameters)
# Retrieving parameters of the model
res.M <- summary(mcmc)
if (det.part == "loglogisticbinom_3") {
d <- res.M$quantiles["d", "50%"]
dinf <- res.M$quantiles["d", "2.5%"]
dsup <- res.M$quantiles["d", "97.5%"]
}
# for loglogisticbinom_2 and 3
b <- 10^res.M$quantiles["log10b", "50%"]
e <- 10^res.M$quantiles["log10e", "50%"]
binf <- 10^res.M$quantiles["log10b", "2.5%"]
einf <- 10^res.M$quantiles["log10e", "2.5%"]
bsup <- 10^res.M$quantiles["log10b", "97.5%"]
esup <- 10^res.M$quantiles["log10e", "97.5%"]
# Definition of the parameter storage and storage data
# If Poisson Model
if (det.part == "loglogisticbinom_3") {
# if mortality in control
rownames <- c("b", "d", "e")
params <- c(b, d, e)
CIinf <- c(binf, dinf, einf)
CIsup <- c(bsup, dsup, esup)
} else {
# if no mortality in control
# Definition of the parameter storage and storage data
rownames <- c("b", "e")
params <- c(b, e)
CIinf <- c(binf, einf)
CIsup <- c(bsup, esup)
}
res <- data.frame(median = params, Q2.5 = CIinf, Q97.5 = CIsup,
row.names = rownames)
return(res)
}
llbinom3.model.text <- "\nmodel # Loglogistic binomial model with 3 parameters\n\t\t{\t\nfor (i in 1:n)\n{\np[i] <- d/ (1 + (xconc[i]/e)^b)\nNsurv[i]~ dbin(p[i], Ninit[i])\n}\n\n# specification of priors (may be changed if needed)\nd ~ dunif(dmin, dmax)\nlog10b ~ dunif(log10bmin, log10bmax)\nlog10e ~ dnorm(meanlog10e, taulog10e)\n\nb <- pow(10, log10b)\ne <- pow(10, log10e)\n}\n"
llbinom2.model.text <- "\nmodel # Loglogistic binomial model with 2 parameters\n\t\t{\t\nfor (i in 1:n)\n{\np[i] <- 1/ (1 + (xconc[i]/e)^b)\nNsurv[i]~ dbin(p[i], Ninit[i])\n}\n\n# specification of priors (may be changed if needed)\nlog10b ~ dunif(log10bmin, log10bmax)\nlog10e ~ dnorm(meanlog10e, taulog10e)\n\nb <- pow(10, log10b)\ne <- pow(10, log10e)\n}\n"
|
library(tidyverse)
x <- runif(100,5,25)
e1 <- rnorm(100,0,1)
e2 <- rnorm(100,0,1)
y1 <- 2+0.8*x+e1
y2 <- 15+0.8*x+e2
reg1 <- data.frame(x,y1,y2)
ggplot(reg1,aes(x,y1))+
geom_point() +
scale_y_continuous(limits = c(5,40))
ggplot(reg1,aes(x,y2))+
geom_point() +
scale_y_continuous(limits = c(5,40))
ggplot(reg1)+
geom_point(aes(x,y1), shape=4) +
geom_point(aes(x,y2), shape=19) +
geom_smooth(aes(x,y1), color="blue") +
geom_smooth(aes(x,y2), color="red", linetype="dashed") +
scale_y_continuous(limits = c(5,40)) +
scale_y_continuous(name="")
cor(x,y1)
cor(x,y2) | /exams_r/exam_regresion.R | no_license | jrlacalle/material_docente | R | false | false | 583 | r | library(tidyverse)
x <- runif(100,5,25)
e1 <- rnorm(100,0,1)
e2 <- rnorm(100,0,1)
y1 <- 2+0.8*x+e1
y2 <- 15+0.8*x+e2
reg1 <- data.frame(x,y1,y2)
ggplot(reg1,aes(x,y1))+
geom_point() +
scale_y_continuous(limits = c(5,40))
ggplot(reg1,aes(x,y2))+
geom_point() +
scale_y_continuous(limits = c(5,40))
ggplot(reg1)+
geom_point(aes(x,y1), shape=4) +
geom_point(aes(x,y2), shape=19) +
geom_smooth(aes(x,y1), color="blue") +
geom_smooth(aes(x,y2), color="red", linetype="dashed") +
scale_y_continuous(limits = c(5,40)) +
scale_y_continuous(name="")
cor(x,y1)
cor(x,y2) |
#' Multiple plot function
#'
#' ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
#' - cols: Number of columns in layout
#' - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#'
#' If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
#' then plot 1 will go in the upper left, 2 will go in the upper right, and
#' 3 will go all the way across the bottom.
#'
#' @export
multiplot <- function(..., plotlist=NULL, file=NULL, cols=1, layout=NULL, width=8.5, height=11) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
if(!is.null(file)) pdf(file=file,width,height)
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
if(!is.null(file)) dev.off()
}
}
#############################################
# calculate means of subsets of a dataframe #
#############################################
#' Calculate means
#'
#' @export
calc_means <- function(df, x_col) {
df$x <- df[[x_col]]
df %>%
group_by(type) %>%
summarise(
n = n(),
mean = mean(x),
`mean + sigma` = mean + sd(x),
`mean - sigma` = mean - sd(x),
`mean + 2 sigma` = mean + 2 * sd(x),
`mean - 2 sigma` = mean - 2 * sd(x)
) %>%
gather(linetype, yintercept, -type, -n) %>%
filter(!is.na(yintercept))
}
#########################################
# functions for clumped data processing #
#########################################
############
### Calculating external errors after initial york regressions
###########
#' Calc ext err
#'
#' Description
#'
#' @param df this will usually be a datafram with heated gas data
#' @param mass either 47 or 48
#' @param slope york slope from the initial regression
#' @param intercept york intercept from the initial regression
#' @param N number of heated gases
#' @export
calc_exterr<-function(df, mass, slope, intercept, N){
Chi.value<-(df[[paste0("D",mass)]]-(slope * df[[paste0("d",mass)]] + intercept))/df[[paste0("D",mass,".sterr")]]
df[[paste0("d",mass,"exterr")]]<-sqrt(df[[paste0("d",mass,".sterr")]]*abs(sum(Chi.value))/(N-2))
df[[paste0("D",mass,"exterr")]]<-sqrt(df[[paste0("D",mass,".sterr")]]*abs(sum(Chi.value))/(N-2))
return(df)
}
###############
## calculating confidence and predictive intervals for york regressions
###############
#' York conf predictio interval
#'
#' Description
#'
#' @param reg.df dataframe that the york regression is based on - might be HG only or same as all data
#' @param all.df all the data of interest for constructing confidence intervals; might be same as reg.df
#' @param mass typically either 47 or 48
#' @param slope york slope
#' @param intercept york intercept
#' @param conf.level level of confidence interval, e.g. use 0.95 for 95\% confidence
#' @export
york.conf.pred.interval<-function(reg.df, all.df, mass, slope, intercept, conf.level) {
line.predicted.Y<-slope * reg.df[[paste0("d",mass)]] + intercept #calculates predicted D48 value for just the regression data
W <- sqrt( 2 * qf(conf.level, 2, length(line.predicted.Y)-2) ) #the Working-Hotelling multiplier
SE.est.pred <- sqrt(sum((reg.df[[paste0("D",mass)]]-line.predicted.Y)^2)/(length(line.predicted.Y)-2))*sqrt(1+1/length(line.predicted.Y)+(all.df[[paste0("d",mass)]]-mean(reg.df[[paste0("d",mass)]]))^2/sum((reg.df[[paste0("d",mass)]]-mean(reg.df[[paste0("d",mass)]]))^2)) #calculates predictive interval parameter
SE.est.conf <- sqrt(sum((reg.df[[paste0("D",mass)]] - line.predicted.Y)^2)/(length(line.predicted.Y) - 2)) * sqrt(1/length(line.predicted.Y)+(all.df[[paste0("d",mass)]]-mean(reg.df[[paste0("d",mass)]]))^2/sum((reg.df[[paste0("d",mass)]]-mean(reg.df[[paste0("d",mass)]]))^2)) #calculates confidence interval parameter
conf.int.df<-all.df
conf.int.df[[paste0("D",mass,".conf.int")]] <- W*SE.est.conf #calculates the amount of D48 excess that falls within the confidence interval
conf.int.df[[paste0("D",mass,".pred.int")]] <- W*SE.est.pred
conf.int.df[["all.predicted.Y"]]<-slope * all.df[[paste0("d",mass)]] + intercept #predicted D48 value from line for all data points, needed for plotting data
return(conf.int.df)
}
#####################
## calculate temps and se from D47 values
######################
#' Original Ghosh calibration, using CIT reference frame values
#' @export
convert_CIT.D47_to_CIT.Ghosh.temp <- function(D47) {
round((59200/(D47+0.02))^0.5-273.15, 1)
}
#' Ghosh calibration in ARF ref frame, using ARF D47 values
#' @export
convert_ARF.D47_to_ARF.Ghosh.temp <- function(D47) {
round((63600/(D47+0.0047))^0.5-273.15, 1)
}
#' Dennis Calibration in ARF ref frame, using ARF D47 values
#' @export
convert_ARF.D47_to_ARF.Dennis.temp <- function(D47) {
round((36200/(D47-0.292))^0.5-273.15, 1)
}
#' Calc D47 temp se
#' @export
calc_D47.Temp_se <- function(Temp, D47, D47se) {
round(sqrt(abs((((Temp + 273.15)^6)/4)*(6.35164845416249E-13 + 2 * D47 * - 1.03937857088367E-12 +
((D47)^2) * 1.7284993474114E-12 + 0.0000169461014527562^2 * D47se^2))), 1)
}
#' Calc d18
#' @export
calc_d18Ow <- function(d18Om, Temp) {
round((((1.03091 * d18Om + 30.91) + 1000)/(exp((((18.03 * 10^3)/(Temp + 273.15)) - 32.42) / 10^3))) - 1000, 1)
}
#' Calc d18O
#' @export
calc_d18Owse <- function (d18Om, d18Omse, Temp, Tempse) {
round(sqrt((((18.03 * ((1.03091 * d18Om + 30.91) + 1000) * exp(0.03242)) / (exp(18.03 / (Temp + 273.15)) * (Temp + 273.15)^2) * Tempse)^2) + ((exp(0.03242) / exp(18.03/(Temp + 273.15)) * d18Omse)^2)), 1)
}
#####################################
## York Regression in general form ##
#####################################
#' York reg general form
#'
#' script for generating a york regression of a dataset with errors in both x and y
#' @export
york.regression<-function(X,x.err,Y,y.err,error.corr) #for clumps, X is d47, Y is D47, and the term "error.corr" is for how correlated the y.err are with the x.err - for my purposes, I usually use a value of 0
{ #opens the function, everything with one "<"in here is internal, two "<" means I can call the value by naming it
weightsX<-1/(x.err^2) #set up to input error rather than variance; could use either stdev or sterr in this depending on how your statistics are being used
weightsY<-1/(y.err^2)
alpha<-sqrt(weightsX*weightsY)
initial.slope<-(coef(lm(Y~X))[[2]]) #uses typical least squares linear model to calculate an initial slope to start the iterations
initial.intercept<-(coef(lm(Y~X))[[1]]) #uses typical least squares linear model to calculate an initial intercept to start the interations
Wi<-(weightsX*weightsY/(weightsX+initial.slope^2*weightsY-2*initial.slope*error.corr*alpha))
Ui<-X-(sum(Wi*X)/sum(Wi))
Vi<-Y-(sum(Wi*Y)/sum(Wi))
beta.i<-Wi*(Ui/weightsY+initial.slope*Vi/weightsX-(initial.slope*Ui+Vi)*error.corr/alpha)
york.slope<-sum(Wi*beta.i*Vi)/sum(Wi*beta.i*Ui)
york.intercept<-sum(Wi*Y)/sum(Wi)-york.slope*sum(Wi*X)/sum(Wi)
york.slope.temp<-numeric(1000) #defines the intermediate slope values, for each step of iteration. Neccesary if I want R to store these values so i can monitor the changes
york.int.temp<-numeric(1000) #defines the intermediate intercept values, for each step of iteration. Neccesary if I want to R to store these values so i can monitor the changes
for (i in 1:1000) { #defines loop for subsequent iterations, brackets open the part that contains the looped equations
Wi<-(weightsX*weightsY/(weightsX+york.slope^2*weightsY-2*york.slope*error.corr*alpha))
Ui<-X-(sum(Wi*X)/sum(Wi))
Vi<-Y-(sum(Wi*Y)/sum(Wi))
beta.i<-Wi*(Ui/weightsY+york.slope*Vi/weightsX-(york.slope*Ui+Vi)*error.corr/alpha)
york.slope<-sum(Wi*beta.i*Vi)/sum(Wi*beta.i*Ui) #this gives your final slope value
york.intercept<-sum(Wi*Y)/sum(Wi)-york.slope*sum(Wi*X)/sum(Wi) #final york intercept value
york.slope.temp[i]<-york.slope
york.int.temp[i]<-york.intercept
} #closes loop
xi<-sum(Wi*X)/sum(Wi)+beta.i
ui<-xi-sum(Wi*xi)/sum(Wi)
york.slopevar<-1/sum(Wi*ui^2) #variance of the slope
york.intvar<-1/sum(Wi)+(sum(Wi*xi)/sum(Wi))^2*york.slopevar #variance of the intercept
N<-length(X)
york.fit<-sum(Wi*(Y-york.slope*X-york.intercept)^2)/(N-2) #goodness of fit.
S<-sum(1/(x.err^2))
Sx<-sum(X/(x.err^2))
Sxx<-sum(X^2/(x.err^2))
Cab<- -Sx/(S*Sxx-(Sx^2)) #measure of covariance of slope and intecept, maybe lifted from Least squares approach?
return(lapply(setNames(ls(), ls()), function(i) get(i, -1)))
} #closes the function. Everything after this runs the function or calls the results
########################################################
# functions for adding regression equations to ggplots #
# use with the geom_text(x=, y= label=lm_eqn(...)) #
########################################################
#' lm poly eq on plot
#' for just a second-order polynomial
#' @export
lm_poly2eqn = function(x,y){
m = lm(y ~ poly(x,2, raw=TRUE)) #need raw=TRUE to get same coefficients as excel and same fit done by GGPLOT2
eq <- substitute(italic(y) == c %.% italic(x)^2 + b %.% italic(x) + a * "," ~ italic(r)^2 == r2,
list(a = format(coef(m)[1], digits = 2),
b = format(coef(m)[2], digits = 2),
c = format(coef(m)[3], digits = 2),
r2 = format(summary(m)$r.squared, digits = 2)))
as.character(as.expression(eq))
}
# lm eqn. on plots
# for just a basic linear regression
# @export
# lm_eqn = function(x,y){
# m = lm(y ~ x)
# eq <- substitute(italic(y) == b %.% italic(x) + a * "," ~ italic(r)^2 == r2,
# list(a = format(coef(m)[1], digits = 2),
# b = format(coef(m)[2], digits = 2),
# r2 = format(summary(m)$r.squared, digits = 2)))
# as.character(as.expression(eq))
# }
#' lm eqn. on plots
#' form that deals with negatives better, normal linear regression
#' for just a basic linear regression
#' @export
lm_eqn = function(x,y) {
m = lm(y ~ x)
l <- list(a = format(abs(coef(m)[1]), digits = 2),
b = format(coef(m)[2], digits = 2),
r2 = format(summary(m)$r.squared, digits = 2));
if (coef(m)[1] >= 0) {
eq <- substitute(italic(y) == b %.% italic(x) + a *","~~italic(r)^2~"="~r2,l)
} else {
eq <- substitute(italic(y) == b %.% italic(x) - a *","~~italic(r)^2~"="~r2,l)
}
as.character(as.expression(eq));
}
#' plot eqns
#'
#' form that does this for york regression results
#' @export
york_eqn = function(slope, intercept){
l<-list(m=round(slope, 4),
b=round(abs(intercept), 4) );
if (intercept >= 0) {
eq <- substitute(italic(y) == m %.% italic(x) + b, l)
} else {
eq <- substitute(italic(y) == m %.% italic(x) - b, l)
}
as.character(as.expression(eq));
}
###########################
#' function for ascribing a color from the wheel
#' @export
gg_color_hue <- function(n) {
hues = seq(15, 375, length=n+1)
hcl(h=hues, l=65, c=100)[1:n]
}
######################
## coding examples: expressions
#how to code strings of text, useful for using "parse" eg in geom_text
#ggplot(data.frame(x=1:3, y=1:3), aes(x,y)) + geom_point() + labs(x=expression(Delta^2 + a == x^2 + 5 ~ "hello" ~ sqrt(x) ~ "" ~ Delta), y=expression(Delta[47]^"wurst" ~ "C"))
######################
#math expressions that can be used in plots to create d18O min and fluid contours, if x is temp, and y is 18O
#e.g. fluid contour is y=0.97006*(((z+1000)*exp((((18.03*10^3)/(x+273.15))-32.42)/10^3))-1000)-29.94 for a given z (d18O fluid)
#e.g. 1.03091*(((z+1000)*1/(exp((((18.03*10^3)/(x+273.15))-32.42)/10^3)))-1000) + 30.91 for a given z (d18O rock)
#' contours
#' @export
contours<-function(x, k, fun) {
xN<-length(x)
kN<-length(k)
allX<-rep(x, times=kN)
allK<-rep(k, each=xN)
y<-do.call(fun, args=list(x=allX, k=allK))
data.frame(x=allX, y=y, k=allK)
}
#example code for how to call the contour functions
# modelDF.fluid<-contours(
# x=seq(from=0, to=40, by=0.1),
# k=seq(from=-9, to=3, by=1),
# fun=function(x, k) 0.97006*(((k+1000)*exp((((18.03*10^3)/(x+273.15))-32.42)/10^3))-1000)-29.94)
# modelDF.rock<-contours(
# x=seq(from=20, to=60, by=0.1),
# k=seq(from=-9, to=3, by=1),
# fun=function(x, k) 1.03091*(((k+1000)*1/(exp((((18.03*10^3)/(x+273.15))-32.42)/10^3)))-1000) + 30.91)
#example code to plot the contours
#ggplot(modelDF, aes(x, y, group=k, label=k)) + geom_line(colour="red") + geom_text(data=subset(modelDF, x==-5), aes(y=y+0.3), colour="red")
# excel export ---------
#' add worksheet with data
#' @export
add_ws_with_data <- function(wb, sheet, data) {
addWorksheet(wb, sheet)
writeData(wb, sheet=sheet, data)
return(wb)
}
##########################################
# code for color-blind friendly palettes
#########################
.onLoad <- function(libname, pkgname) {
# make global variables for palettes
# The palette with grey:
cbPalette <<- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# The palette with black:
cbbPalette <<- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
}
# To use for fills, add
#scale_fill_manual(values=cbPalette)
# To use for line and point colors, add
#scale_colour_manual(values=cbPalette)
#######################################
#Function for defining legend size#
# Create a function with three arguments
# p : pre-defined plot
# legend_size : legend size, expressed as percentage of window width
# legend_text_size : font size in points
ggplot_size_legend <- function(p, legend_size=0.1, legend_text_size=10)
{
Layout <- grid.layout(nrow = 1,
ncol = 2,
widths = unit(c(1-legend_size, legend_size),
c("null", "null")),
heights = unit(1, "null"))
vplayout <- function(...) {
grid.newpage()
pushViewport(viewport(layout = Layout))
}
subplot <- function(x, y) viewport(layout.pos.row = x,
layout.pos.col = y)
#create two plots, one with legend, one without
pl <- p + opts(legend.position = "none")
pn <- p + theme_grey(legend_text_size) + opts(keep = "legend_box")
# print the plot
vplayout()
print(pl, vp = subplot(1, 1))
print(pn, vp = subplot(1, 2))
}
######################
# expand data frames #
######################
# (same as expand grid but paramters can also be whole data frames)
expand.df <- function(...) {
# convert all params to data.frames
l <- list(...)
dfs <- lapply(1:length(l), function(i) { if(is.data.frame(l[[i]])) l[[i]] else as.data.frame(l[i])})
# get indices grid
indices <- lapply(rev(dfs), function(df) seq_len(nrow(df)))
ind.grid <- do.call(expand.grid, indices)
#use subsetting and cbind
exp.dfs <- lapply(1:length(dfs), function(i) dfs[[i]][ind.grid[,length(dfs)+1-i], , drop = F])
do.call(cbind, exp.dfs)
}
############################
# multiple legends display #
############################
#code from Seb to get ggplot legends to display correctly when using separate variables for multiple aesthetics, e.g. fill and shape are determines by different variable; note, this can often be better displayed with facet
guides_fill_shape <- function(...){
guides(fill = guide_legend(override.aes = list(colour = "white", size=8, shape = 22), ...),
shape = guide_legend(override.aes = list(fill = "gray"), ...))
}
| /R/custom.functions.R | no_license | KopfLab/isoprocessCUBES | R | false | false | 16,521 | r | #' Multiple plot function
#'
#' ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
#' - cols: Number of columns in layout
#' - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#'
#' If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
#' then plot 1 will go in the upper left, 2 will go in the upper right, and
#' 3 will go all the way across the bottom.
#'
#' @export
multiplot <- function(..., plotlist=NULL, file=NULL, cols=1, layout=NULL, width=8.5, height=11) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
if(!is.null(file)) pdf(file=file,width,height)
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
if(!is.null(file)) dev.off()
}
}
#############################################
# calculate means of subsets of a dataframe #
#############################################
#' Calculate means
#'
#' @export
calc_means <- function(df, x_col) {
df$x <- df[[x_col]]
df %>%
group_by(type) %>%
summarise(
n = n(),
mean = mean(x),
`mean + sigma` = mean + sd(x),
`mean - sigma` = mean - sd(x),
`mean + 2 sigma` = mean + 2 * sd(x),
`mean - 2 sigma` = mean - 2 * sd(x)
) %>%
gather(linetype, yintercept, -type, -n) %>%
filter(!is.na(yintercept))
}
#########################################
# functions for clumped data processing #
#########################################
############
### Calculating external errors after initial york regressions
###########
#' Calc ext err
#'
#' Description
#'
#' @param df this will usually be a datafram with heated gas data
#' @param mass either 47 or 48
#' @param slope york slope from the initial regression
#' @param intercept york intercept from the initial regression
#' @param N number of heated gases
#' @export
calc_exterr<-function(df, mass, slope, intercept, N){
Chi.value<-(df[[paste0("D",mass)]]-(slope * df[[paste0("d",mass)]] + intercept))/df[[paste0("D",mass,".sterr")]]
df[[paste0("d",mass,"exterr")]]<-sqrt(df[[paste0("d",mass,".sterr")]]*abs(sum(Chi.value))/(N-2))
df[[paste0("D",mass,"exterr")]]<-sqrt(df[[paste0("D",mass,".sterr")]]*abs(sum(Chi.value))/(N-2))
return(df)
}
###############
## calculating confidence and predictive intervals for york regressions
###############
#' York conf predictio interval
#'
#' Description
#'
#' @param reg.df dataframe that the york regression is based on - might be HG only or same as all data
#' @param all.df all the data of interest for constructing confidence intervals; might be same as reg.df
#' @param mass typically either 47 or 48
#' @param slope york slope
#' @param intercept york intercept
#' @param conf.level level of confidence interval, e.g. use 0.95 for 95\% confidence
#' @export
york.conf.pred.interval<-function(reg.df, all.df, mass, slope, intercept, conf.level) {
line.predicted.Y<-slope * reg.df[[paste0("d",mass)]] + intercept #calculates predicted D48 value for just the regression data
W <- sqrt( 2 * qf(conf.level, 2, length(line.predicted.Y)-2) ) #the Working-Hotelling multiplier
SE.est.pred <- sqrt(sum((reg.df[[paste0("D",mass)]]-line.predicted.Y)^2)/(length(line.predicted.Y)-2))*sqrt(1+1/length(line.predicted.Y)+(all.df[[paste0("d",mass)]]-mean(reg.df[[paste0("d",mass)]]))^2/sum((reg.df[[paste0("d",mass)]]-mean(reg.df[[paste0("d",mass)]]))^2)) #calculates predictive interval parameter
SE.est.conf <- sqrt(sum((reg.df[[paste0("D",mass)]] - line.predicted.Y)^2)/(length(line.predicted.Y) - 2)) * sqrt(1/length(line.predicted.Y)+(all.df[[paste0("d",mass)]]-mean(reg.df[[paste0("d",mass)]]))^2/sum((reg.df[[paste0("d",mass)]]-mean(reg.df[[paste0("d",mass)]]))^2)) #calculates confidence interval parameter
conf.int.df<-all.df
conf.int.df[[paste0("D",mass,".conf.int")]] <- W*SE.est.conf #calculates the amount of D48 excess that falls within the confidence interval
conf.int.df[[paste0("D",mass,".pred.int")]] <- W*SE.est.pred
conf.int.df[["all.predicted.Y"]]<-slope * all.df[[paste0("d",mass)]] + intercept #predicted D48 value from line for all data points, needed for plotting data
return(conf.int.df)
}
#####################
## calculate temps and se from D47 values
######################
#' Original Ghosh calibration, using CIT reference frame values
#' @export
convert_CIT.D47_to_CIT.Ghosh.temp <- function(D47) {
round((59200/(D47+0.02))^0.5-273.15, 1)
}
#' Ghosh calibration in ARF ref frame, using ARF D47 values
#' @export
convert_ARF.D47_to_ARF.Ghosh.temp <- function(D47) {
round((63600/(D47+0.0047))^0.5-273.15, 1)
}
#' Dennis Calibration in ARF ref frame, using ARF D47 values
#' @export
convert_ARF.D47_to_ARF.Dennis.temp <- function(D47) {
round((36200/(D47-0.292))^0.5-273.15, 1)
}
#' Calc D47 temp se
#' @export
calc_D47.Temp_se <- function(Temp, D47, D47se) {
round(sqrt(abs((((Temp + 273.15)^6)/4)*(6.35164845416249E-13 + 2 * D47 * - 1.03937857088367E-12 +
((D47)^2) * 1.7284993474114E-12 + 0.0000169461014527562^2 * D47se^2))), 1)
}
#' Calc d18
#' @export
calc_d18Ow <- function(d18Om, Temp) {
round((((1.03091 * d18Om + 30.91) + 1000)/(exp((((18.03 * 10^3)/(Temp + 273.15)) - 32.42) / 10^3))) - 1000, 1)
}
#' Calc d18O
#' @export
calc_d18Owse <- function (d18Om, d18Omse, Temp, Tempse) {
round(sqrt((((18.03 * ((1.03091 * d18Om + 30.91) + 1000) * exp(0.03242)) / (exp(18.03 / (Temp + 273.15)) * (Temp + 273.15)^2) * Tempse)^2) + ((exp(0.03242) / exp(18.03/(Temp + 273.15)) * d18Omse)^2)), 1)
}
#####################################
## York Regression in general form ##
#####################################
#' York reg general form
#'
#' script for generating a york regression of a dataset with errors in both x and y
#' @export
york.regression<-function(X,x.err,Y,y.err,error.corr) #for clumps, X is d47, Y is D47, and the term "error.corr" is for how correlated the y.err are with the x.err - for my purposes, I usually use a value of 0
{ #opens the function, everything with one "<"in here is internal, two "<" means I can call the value by naming it
weightsX<-1/(x.err^2) #set up to input error rather than variance; could use either stdev or sterr in this depending on how your statistics are being used
weightsY<-1/(y.err^2)
alpha<-sqrt(weightsX*weightsY)
initial.slope<-(coef(lm(Y~X))[[2]]) #uses typical least squares linear model to calculate an initial slope to start the iterations
initial.intercept<-(coef(lm(Y~X))[[1]]) #uses typical least squares linear model to calculate an initial intercept to start the interations
Wi<-(weightsX*weightsY/(weightsX+initial.slope^2*weightsY-2*initial.slope*error.corr*alpha))
Ui<-X-(sum(Wi*X)/sum(Wi))
Vi<-Y-(sum(Wi*Y)/sum(Wi))
beta.i<-Wi*(Ui/weightsY+initial.slope*Vi/weightsX-(initial.slope*Ui+Vi)*error.corr/alpha)
york.slope<-sum(Wi*beta.i*Vi)/sum(Wi*beta.i*Ui)
york.intercept<-sum(Wi*Y)/sum(Wi)-york.slope*sum(Wi*X)/sum(Wi)
york.slope.temp<-numeric(1000) #defines the intermediate slope values, for each step of iteration. Neccesary if I want R to store these values so i can monitor the changes
york.int.temp<-numeric(1000) #defines the intermediate intercept values, for each step of iteration. Neccesary if I want to R to store these values so i can monitor the changes
for (i in 1:1000) { #defines loop for subsequent iterations, brackets open the part that contains the looped equations
Wi<-(weightsX*weightsY/(weightsX+york.slope^2*weightsY-2*york.slope*error.corr*alpha))
Ui<-X-(sum(Wi*X)/sum(Wi))
Vi<-Y-(sum(Wi*Y)/sum(Wi))
beta.i<-Wi*(Ui/weightsY+york.slope*Vi/weightsX-(york.slope*Ui+Vi)*error.corr/alpha)
york.slope<-sum(Wi*beta.i*Vi)/sum(Wi*beta.i*Ui) #this gives your final slope value
york.intercept<-sum(Wi*Y)/sum(Wi)-york.slope*sum(Wi*X)/sum(Wi) #final york intercept value
york.slope.temp[i]<-york.slope
york.int.temp[i]<-york.intercept
} #closes loop
xi<-sum(Wi*X)/sum(Wi)+beta.i
ui<-xi-sum(Wi*xi)/sum(Wi)
york.slopevar<-1/sum(Wi*ui^2) #variance of the slope
york.intvar<-1/sum(Wi)+(sum(Wi*xi)/sum(Wi))^2*york.slopevar #variance of the intercept
N<-length(X)
york.fit<-sum(Wi*(Y-york.slope*X-york.intercept)^2)/(N-2) #goodness of fit.
S<-sum(1/(x.err^2))
Sx<-sum(X/(x.err^2))
Sxx<-sum(X^2/(x.err^2))
Cab<- -Sx/(S*Sxx-(Sx^2)) #measure of covariance of slope and intecept, maybe lifted from Least squares approach?
return(lapply(setNames(ls(), ls()), function(i) get(i, -1)))
} #closes the function. Everything after this runs the function or calls the results
########################################################
# functions for adding regression equations to ggplots #
# use with the geom_text(x=, y= label=lm_eqn(...)) #
########################################################
#' lm poly eq on plot
#' for just a second-order polynomial
#' @export
lm_poly2eqn = function(x,y){
m = lm(y ~ poly(x,2, raw=TRUE)) #need raw=TRUE to get same coefficients as excel and same fit done by GGPLOT2
eq <- substitute(italic(y) == c %.% italic(x)^2 + b %.% italic(x) + a * "," ~ italic(r)^2 == r2,
list(a = format(coef(m)[1], digits = 2),
b = format(coef(m)[2], digits = 2),
c = format(coef(m)[3], digits = 2),
r2 = format(summary(m)$r.squared, digits = 2)))
as.character(as.expression(eq))
}
# lm eqn. on plots
# for just a basic linear regression
# @export
# lm_eqn = function(x,y){
# m = lm(y ~ x)
# eq <- substitute(italic(y) == b %.% italic(x) + a * "," ~ italic(r)^2 == r2,
# list(a = format(coef(m)[1], digits = 2),
# b = format(coef(m)[2], digits = 2),
# r2 = format(summary(m)$r.squared, digits = 2)))
# as.character(as.expression(eq))
# }
#' lm eqn. on plots
#' form that deals with negatives better, normal linear regression
#' for just a basic linear regression
#' @export
lm_eqn = function(x,y) {
m = lm(y ~ x)
l <- list(a = format(abs(coef(m)[1]), digits = 2),
b = format(coef(m)[2], digits = 2),
r2 = format(summary(m)$r.squared, digits = 2));
if (coef(m)[1] >= 0) {
eq <- substitute(italic(y) == b %.% italic(x) + a *","~~italic(r)^2~"="~r2,l)
} else {
eq <- substitute(italic(y) == b %.% italic(x) - a *","~~italic(r)^2~"="~r2,l)
}
as.character(as.expression(eq));
}
#' plot eqns
#'
#' form that does this for york regression results
#' @export
york_eqn = function(slope, intercept){
l<-list(m=round(slope, 4),
b=round(abs(intercept), 4) );
if (intercept >= 0) {
eq <- substitute(italic(y) == m %.% italic(x) + b, l)
} else {
eq <- substitute(italic(y) == m %.% italic(x) - b, l)
}
as.character(as.expression(eq));
}
###########################
#' function for ascribing a color from the wheel
#' @export
gg_color_hue <- function(n) {
hues = seq(15, 375, length=n+1)
hcl(h=hues, l=65, c=100)[1:n]
}
######################
## coding examples: expressions
#how to code strings of text, useful for using "parse" eg in geom_text
#ggplot(data.frame(x=1:3, y=1:3), aes(x,y)) + geom_point() + labs(x=expression(Delta^2 + a == x^2 + 5 ~ "hello" ~ sqrt(x) ~ "" ~ Delta), y=expression(Delta[47]^"wurst" ~ "C"))
######################
#math expressions that can be used in plots to create d18O min and fluid contours, if x is temp, and y is 18O
#e.g. fluid contour is y=0.97006*(((z+1000)*exp((((18.03*10^3)/(x+273.15))-32.42)/10^3))-1000)-29.94 for a given z (d18O fluid)
#e.g. 1.03091*(((z+1000)*1/(exp((((18.03*10^3)/(x+273.15))-32.42)/10^3)))-1000) + 30.91 for a given z (d18O rock)
#' contours
#' @export
contours<-function(x, k, fun) {
xN<-length(x)
kN<-length(k)
allX<-rep(x, times=kN)
allK<-rep(k, each=xN)
y<-do.call(fun, args=list(x=allX, k=allK))
data.frame(x=allX, y=y, k=allK)
}
#example code for how to call the contour functions
# modelDF.fluid<-contours(
# x=seq(from=0, to=40, by=0.1),
# k=seq(from=-9, to=3, by=1),
# fun=function(x, k) 0.97006*(((k+1000)*exp((((18.03*10^3)/(x+273.15))-32.42)/10^3))-1000)-29.94)
# modelDF.rock<-contours(
# x=seq(from=20, to=60, by=0.1),
# k=seq(from=-9, to=3, by=1),
# fun=function(x, k) 1.03091*(((k+1000)*1/(exp((((18.03*10^3)/(x+273.15))-32.42)/10^3)))-1000) + 30.91)
#example code to plot the contours
#ggplot(modelDF, aes(x, y, group=k, label=k)) + geom_line(colour="red") + geom_text(data=subset(modelDF, x==-5), aes(y=y+0.3), colour="red")
# excel export ---------
#' add worksheet with data
#' @export
add_ws_with_data <- function(wb, sheet, data) {
addWorksheet(wb, sheet)
writeData(wb, sheet=sheet, data)
return(wb)
}
##########################################
# code for color-blind friendly palettes
#########################
.onLoad <- function(libname, pkgname) {
# make global variables for palettes
# The palette with grey:
cbPalette <<- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# The palette with black:
cbbPalette <<- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
}
# To use for fills, add
#scale_fill_manual(values=cbPalette)
# To use for line and point colors, add
#scale_colour_manual(values=cbPalette)
#######################################
#Function for defining legend size#
# Create a function with three arguments
# p : pre-defined plot
# legend_size : legend size, expressed as percentage of window width
# legend_text_size : font size in points
ggplot_size_legend <- function(p, legend_size=0.1, legend_text_size=10)
{
Layout <- grid.layout(nrow = 1,
ncol = 2,
widths = unit(c(1-legend_size, legend_size),
c("null", "null")),
heights = unit(1, "null"))
vplayout <- function(...) {
grid.newpage()
pushViewport(viewport(layout = Layout))
}
subplot <- function(x, y) viewport(layout.pos.row = x,
layout.pos.col = y)
#create two plots, one with legend, one without
pl <- p + opts(legend.position = "none")
pn <- p + theme_grey(legend_text_size) + opts(keep = "legend_box")
# print the plot
vplayout()
print(pl, vp = subplot(1, 1))
print(pn, vp = subplot(1, 2))
}
######################
# expand data frames #
######################
# (same as expand grid but paramters can also be whole data frames)
expand.df <- function(...) {
# convert all params to data.frames
l <- list(...)
dfs <- lapply(1:length(l), function(i) { if(is.data.frame(l[[i]])) l[[i]] else as.data.frame(l[i])})
# get indices grid
indices <- lapply(rev(dfs), function(df) seq_len(nrow(df)))
ind.grid <- do.call(expand.grid, indices)
#use subsetting and cbind
exp.dfs <- lapply(1:length(dfs), function(i) dfs[[i]][ind.grid[,length(dfs)+1-i], , drop = F])
do.call(cbind, exp.dfs)
}
############################
# multiple legends display #
############################
#code from Seb to get ggplot legends to display correctly when using separate variables for multiple aesthetics, e.g. fill and shape are determines by different variable; note, this can often be better displayed with facet
guides_fill_shape <- function(...){
guides(fill = guide_legend(override.aes = list(colour = "white", size=8, shape = 22), ...),
shape = guide_legend(override.aes = list(fill = "gray"), ...))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/listCoins.R
\name{listCoins}
\alias{listCoins}
\title{Retrieves name, symbol, slug and rank for all tokens}
\usage{
listCoins(coin = NULL, start_date = NULL, end_date = NULL)
}
\arguments{
\item{coin}{Name, symbol or slug of crypto currency}
\item{start_date}{Start date to retrieve data from, format yyyymmdd}
\item{end_date}{Start date to retrieve data from, format yyyymmdd}
\item{...}{No arguments, return all coins}
}
\value{
Crypto currency historic OHLC market data in a dataframe:
\item{symbol}{Coin symbol (not-unique)}
\item{name}{Coin name}
\item{slug}{Coin URL slug (unique)}
\item{rank}{Current rank by market cap}
\item{url}{Historical market tables urls for scraping}
Required dependency that is used in function call \code{getCoins()}.
}
\description{
List all of the crypto currencies that have existed on Coinmarketcap
and use this to populate the URL base for scraping historical market
data. It retrieves name, slug, symbol and rank of cryptocurrencies from
CoinMarketCap and creates URLS for \code{scraper()} to use.
}
\examples{
# return specific coin
coin <- "kin"
coins <- listCoins(coin)
\dontrun{
# return all coins
coin_list <- listCoins()
}
}
| /man/listCoins.Rd | permissive | andrewcheryl/crypto | R | false | true | 1,267 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/listCoins.R
\name{listCoins}
\alias{listCoins}
\title{Retrieves name, symbol, slug and rank for all tokens}
\usage{
listCoins(coin = NULL, start_date = NULL, end_date = NULL)
}
\arguments{
\item{coin}{Name, symbol or slug of crypto currency}
\item{start_date}{Start date to retrieve data from, format yyyymmdd}
\item{end_date}{Start date to retrieve data from, format yyyymmdd}
\item{...}{No arguments, return all coins}
}
\value{
Crypto currency historic OHLC market data in a dataframe:
\item{symbol}{Coin symbol (not-unique)}
\item{name}{Coin name}
\item{slug}{Coin URL slug (unique)}
\item{rank}{Current rank by market cap}
\item{url}{Historical market tables urls for scraping}
Required dependency that is used in function call \code{getCoins()}.
}
\description{
List all of the crypto currencies that have existed on Coinmarketcap
and use this to populate the URL base for scraping historical market
data. It retrieves name, slug, symbol and rank of cryptocurrencies from
CoinMarketCap and creates URLS for \code{scraper()} to use.
}
\examples{
# return specific coin
coin <- "kin"
coins <- listCoins(coin)
\dontrun{
# return all coins
coin_list <- listCoins()
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meshOffset.r
\name{meshOffset}
\alias{meshOffset}
\title{inflate a mesh along its normals}
\usage{
meshOffset(mesh, offset)
}
\arguments{
\item{mesh}{triangular mesh of class "mesh3d"}
\item{offset}{distance to translate the vertices}
}
\value{
returns modified mesh.
}
\description{
translate vertices of a triangular mesh along its normals
}
\examples{
require(Morpho)
data(nose)
offset <- meshOffset(shortnose.mesh,3)
\dontrun{
require(rgl)
shade3d(shortnose.mesh,col=3)
wire3d(offset)
}
}
\author{
Stefan Schlager
}
\keyword{~kwd1}
\keyword{~kwd2}
| /man/meshOffset.Rd | no_license | Celli119/mesheR | R | false | true | 633 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meshOffset.r
\name{meshOffset}
\alias{meshOffset}
\title{inflate a mesh along its normals}
\usage{
meshOffset(mesh, offset)
}
\arguments{
\item{mesh}{triangular mesh of class "mesh3d"}
\item{offset}{distance to translate the vertices}
}
\value{
returns modified mesh.
}
\description{
translate vertices of a triangular mesh along its normals
}
\examples{
require(Morpho)
data(nose)
offset <- meshOffset(shortnose.mesh,3)
\dontrun{
require(rgl)
shade3d(shortnose.mesh,col=3)
wire3d(offset)
}
}
\author{
Stefan Schlager
}
\keyword{~kwd1}
\keyword{~kwd2}
|
testlist <- list(A = structure(c(7.41069701149018e+79, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613103532-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 343 | r | testlist <- list(A = structure(c(7.41069701149018e+79, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
#' @title Minimo
#' @description Encuenta el minimo valor de una variable continua.
#' @param x Nombre de la variable.
#' @examples
#' data("albahaca")
#' minimo(albahaca$produccion)
#' @export
minimo<-function(x){
x<-stats::na.omit(x)
n<-length(x)
i<-2
min<-x[1]
while(i<=n){
if(x[i]<=min){min<-x[i]}
i<-i+1}
return(min)
}
| /R/minimo.R | no_license | ljofreflor/epg3308 | R | false | false | 361 | r | #' @title Minimo
#' @description Encuenta el minimo valor de una variable continua.
#' @param x Nombre de la variable.
#' @examples
#' data("albahaca")
#' minimo(albahaca$produccion)
#' @export
minimo<-function(x){
x<-stats::na.omit(x)
n<-length(x)
i<-2
min<-x[1]
while(i<=n){
if(x[i]<=min){min<-x[i]}
i<-i+1}
return(min)
}
|
testlist <- list(type = 0L, z = 1.39068541872724e-309)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609891410-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 108 | r | testlist <- list(type = 0L, z = 1.39068541872724e-309)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
test_that("kernel must be a valid kernel",{
not_ker <- function(x){"x"}
samples <- c(1, 2, 3, 4)
H_n <- c(0.5, 0.8)
lambda <- 1
subdivisions = 10L
expect_error(pco_method(not_ker,
samples = NULL,
H_n,
lambda,
subdivisions))
})
test_that("samples must be numeric with length greater than 0",{
kernel <- Kernel(epanechnikov_function, c(-1,1))
H_n <- c(0.5, 0.8)
lambda <- 1
subdivisions = 10L
expect_error(pco_method(kernel,
samples = c(1, 2, 3, "a"),
H_n,
lambda,
subdivisions))
expect_error(pco_method(kernel,
samples = NULL,
H_n,
lambda,
subdivisions))
})
test_that("H_n must be numeric and within 1/length(samples) and 1",{
kernel <- Kernel(epanechnikov_function, c(-1,1))
samples <- c(1, 2, 3, 4)
lambda <- 1
subdivisions = 10L
#length(H_n) > length(samples)
expect_error(pco_method(kernel,
samples,
H_n = c(0.6, 0.7, 0.3, 0.4, 0.5),
lambda,
subdivisions))
#contains non-numeric value
expect_error(pco_method(kernel,
samples,
H_n = c(0.6, "a"),
lambda,
subdivisions))
#length(H_n) = 0
expect_error(pco_method(kernel,
samples,
H_n = NULL,
lambda,
subdivisions))
#contains value greater than 1
expect_error(pco_method(kernel,
samples,
H_n = c(0.6, 1.2),
lambda,
subdivisions))
#contains value less than 1/length(samples)
expect_error(pco_method(kernel,
samples,
H_n = c(0.1, 0.6),
lambda,
subdivisions))
#contains value >= 0
expect_error(pco_method(kernel,
samples,
H_n = c(0.6, -0.5),
lambda,
subdivisions))
})
test_that("lambda has to be numerical scalar",{
kernel <- Kernel(epanechnikov_function, c(-1,1))
samples <- c(1, 2, 3, 4)
H_n <- c(0.5, 0.8)
subdivisions <- 10L
expect_error(pco_method(kernel,
samples,
H_n = NULL,
lambda = "a",
subdivisions = 100L))
expect_error(pco_method(kernel,
samples,
H_n = NULL,
lambda = c(1,2),
subdivisions = 100L))
expect_error(pco_method(kernel,
samples,
H_n = NULL,
lambda = TRUE,
subdivisions = 100L))
})
test_that("subdivisions must be a numeric scalar",{
kernel <- Kernel(epanechnikov_function, c(-1,1))
samples <- c(1, 2, 3, 4)
H_n <- c(0.5, 0.8)
lambda <- 1
expect_error(pco_method(kernel,
samples,
H_n,
lambda = 1,
subdivisions = c(1L, 2L)))
expect_error(pco_method(kernel,
samples,
H_n,
lambda = 1,
subdivisions = "a"))
expect_error(pco_method(kernel,
samples,
H_n,
lambda = 1,
subdivisions = FALSE))
})
test_that("pco should return a smaller bandwidth for bigger samplesize", {
set.seed(50)
kernel <- epanechnikov
# Custom density
f_den_eval <- function(x) {
ret <- 1 + sin(2*pi*x)
ret[x < 0 | 1 < x] <- 0
ret
}
f_den <- Density(f_den_eval, c(0,1))
g_den <- Density(dunif, c(0,1))
# Create sampler from custom density
custom_sampler <- rejection_sampling(f_den, g_den, runif, 2)
# Calculate goldenshluger_lepski bandwidth(kernel,
bandwidth_50 <- cross_validation(kernel, custom_sampler(50), subdivisions = 250L)
bandwidth_200 <- cross_validation(kernel, custom_sampler(200), subdivisions = 250L)
bandwidth_500 <- cross_validation(kernel, custom_sampler(500), subdivisions = 250L)
expect_false(bandwidth_500 > bandwidth_200)
expect_true(bandwidth_50 > bandwidth_200)
})
| /tests/testthat/test-pco_method.R | permissive | hericks/KDE | R | false | false | 4,715 | r | test_that("kernel must be a valid kernel",{
not_ker <- function(x){"x"}
samples <- c(1, 2, 3, 4)
H_n <- c(0.5, 0.8)
lambda <- 1
subdivisions = 10L
expect_error(pco_method(not_ker,
samples = NULL,
H_n,
lambda,
subdivisions))
})
test_that("samples must be numeric with length greater than 0",{
kernel <- Kernel(epanechnikov_function, c(-1,1))
H_n <- c(0.5, 0.8)
lambda <- 1
subdivisions = 10L
expect_error(pco_method(kernel,
samples = c(1, 2, 3, "a"),
H_n,
lambda,
subdivisions))
expect_error(pco_method(kernel,
samples = NULL,
H_n,
lambda,
subdivisions))
})
test_that("H_n must be numeric and within 1/length(samples) and 1",{
kernel <- Kernel(epanechnikov_function, c(-1,1))
samples <- c(1, 2, 3, 4)
lambda <- 1
subdivisions = 10L
#length(H_n) > length(samples)
expect_error(pco_method(kernel,
samples,
H_n = c(0.6, 0.7, 0.3, 0.4, 0.5),
lambda,
subdivisions))
#contains non-numeric value
expect_error(pco_method(kernel,
samples,
H_n = c(0.6, "a"),
lambda,
subdivisions))
#length(H_n) = 0
expect_error(pco_method(kernel,
samples,
H_n = NULL,
lambda,
subdivisions))
#contains value greater than 1
expect_error(pco_method(kernel,
samples,
H_n = c(0.6, 1.2),
lambda,
subdivisions))
#contains value less than 1/length(samples)
expect_error(pco_method(kernel,
samples,
H_n = c(0.1, 0.6),
lambda,
subdivisions))
#contains value >= 0
expect_error(pco_method(kernel,
samples,
H_n = c(0.6, -0.5),
lambda,
subdivisions))
})
test_that("lambda has to be numerical scalar",{
kernel <- Kernel(epanechnikov_function, c(-1,1))
samples <- c(1, 2, 3, 4)
H_n <- c(0.5, 0.8)
subdivisions <- 10L
expect_error(pco_method(kernel,
samples,
H_n = NULL,
lambda = "a",
subdivisions = 100L))
expect_error(pco_method(kernel,
samples,
H_n = NULL,
lambda = c(1,2),
subdivisions = 100L))
expect_error(pco_method(kernel,
samples,
H_n = NULL,
lambda = TRUE,
subdivisions = 100L))
})
test_that("subdivisions must be a numeric scalar",{
kernel <- Kernel(epanechnikov_function, c(-1,1))
samples <- c(1, 2, 3, 4)
H_n <- c(0.5, 0.8)
lambda <- 1
expect_error(pco_method(kernel,
samples,
H_n,
lambda = 1,
subdivisions = c(1L, 2L)))
expect_error(pco_method(kernel,
samples,
H_n,
lambda = 1,
subdivisions = "a"))
expect_error(pco_method(kernel,
samples,
H_n,
lambda = 1,
subdivisions = FALSE))
})
test_that("pco should return a smaller bandwidth for bigger samplesize", {
set.seed(50)
kernel <- epanechnikov
# Custom density
f_den_eval <- function(x) {
ret <- 1 + sin(2*pi*x)
ret[x < 0 | 1 < x] <- 0
ret
}
f_den <- Density(f_den_eval, c(0,1))
g_den <- Density(dunif, c(0,1))
# Create sampler from custom density
custom_sampler <- rejection_sampling(f_den, g_den, runif, 2)
# Calculate goldenshluger_lepski bandwidth(kernel,
bandwidth_50 <- cross_validation(kernel, custom_sampler(50), subdivisions = 250L)
bandwidth_200 <- cross_validation(kernel, custom_sampler(200), subdivisions = 250L)
bandwidth_500 <- cross_validation(kernel, custom_sampler(500), subdivisions = 250L)
expect_false(bandwidth_500 > bandwidth_200)
expect_true(bandwidth_50 > bandwidth_200)
})
|
sobig = data.frame(문장번호 = c(1,2,3,4,5,6,7,8,9,10,11:20,21,22),
사건번호 = c(1,1,2,2,2,3,3,3,3,3,4,4,4,4,4,5,5,5,6,6,7,7),
문장순서 = c(1,2,1,2,3,1,2,3,4,5,1,2,3,4,5,1,2,3,1,2,1,2),
문장내용 = c("abc","asdfwe","qwasd","asdda","zxceeeqq","asdasdaasd","ass","bdebfm",
"asdsee","qqwlb","ebvjnfknoj","pdobjipeniper","sdfsdf","iqwiwiww","asks","fjfji4",
"82t4g2hbei","9f23","vb" ,"isdjjd","f안녕","하세아어"),
각각불만유형 = c(2,5,4,4,6,1,5,1,1,1,5,7,2,1,1,1,9,5,7,1,1,1),
전체사건판정 = NA,
불만유형이름 = NA)
sobig$문장내용 = as.character(sobig$문장내용)
library(dplyr)
#nchar(sobig$문장내용)
#nstring = sobig %>% filter(사건번호 == sobig[12,]$사건번호) %>% group_by(각각불만유형) %>% summarise(nstring = sum(nchar(문장내용))) %>% arrange(desc(nstring))
#as.integer(nstring[nstring$각각불만유형 != 1,][1,1])
for(i in 1:22) {
ncount = sobig %>%
filter(사건번호 == sobig[i,]$사건번호) %>%
group_by(각각불만유형) %>%
summarise(count = n()) %>%
arrange(desc(count))
nstring = sobig %>%
filter(사건번호 == sobig[i,]$사건번호) %>%
group_by(각각불만유형) %>%
summarise(nstring = sum(nchar(문장내용))) %>%
arrange(desc(nstring))
est = ifelse(as.integer(ncount[1,1]) == 1 ,
if(dim(ncount)[1] == 1){ ## 각각의 불만유형이 모두 1인 경우
as.integer(ncount[1,1])
}
else if(dim(ncount)[1] == 2){
as.integer(ncount[2,1])
}
else{
if(as.integer(ncount[2,2]) == as.integer(ncount[3,2])) {
as.integer(nstring[nstring$각각불만유형 != 1,][1,1])
}
else as.integer(ncount[2,1])
} ,
if(dim(ncount)[1] == 1) {
as.integer(ncount[1,1])
}
else{
if(as.integer(ncount[1,2]) == as.integer(ncount[2,2])) {
as.integer(nstring[nstring$각각불만유형 != 1,][1,1])
}
else as.integer(ncount[1,1])
})
sobig$전체사건판정[i] = est
} | /considered_auto_fill.R | no_license | HyeonjongPark/kca | R | false | false | 2,411 | r | sobig = data.frame(문장번호 = c(1,2,3,4,5,6,7,8,9,10,11:20,21,22),
사건번호 = c(1,1,2,2,2,3,3,3,3,3,4,4,4,4,4,5,5,5,6,6,7,7),
문장순서 = c(1,2,1,2,3,1,2,3,4,5,1,2,3,4,5,1,2,3,1,2,1,2),
문장내용 = c("abc","asdfwe","qwasd","asdda","zxceeeqq","asdasdaasd","ass","bdebfm",
"asdsee","qqwlb","ebvjnfknoj","pdobjipeniper","sdfsdf","iqwiwiww","asks","fjfji4",
"82t4g2hbei","9f23","vb" ,"isdjjd","f안녕","하세아어"),
각각불만유형 = c(2,5,4,4,6,1,5,1,1,1,5,7,2,1,1,1,9,5,7,1,1,1),
전체사건판정 = NA,
불만유형이름 = NA)
sobig$문장내용 = as.character(sobig$문장내용)
library(dplyr)
#nchar(sobig$문장내용)
#nstring = sobig %>% filter(사건번호 == sobig[12,]$사건번호) %>% group_by(각각불만유형) %>% summarise(nstring = sum(nchar(문장내용))) %>% arrange(desc(nstring))
#as.integer(nstring[nstring$각각불만유형 != 1,][1,1])
for(i in 1:22) {
ncount = sobig %>%
filter(사건번호 == sobig[i,]$사건번호) %>%
group_by(각각불만유형) %>%
summarise(count = n()) %>%
arrange(desc(count))
nstring = sobig %>%
filter(사건번호 == sobig[i,]$사건번호) %>%
group_by(각각불만유형) %>%
summarise(nstring = sum(nchar(문장내용))) %>%
arrange(desc(nstring))
est = ifelse(as.integer(ncount[1,1]) == 1 ,
if(dim(ncount)[1] == 1){ ## 각각의 불만유형이 모두 1인 경우
as.integer(ncount[1,1])
}
else if(dim(ncount)[1] == 2){
as.integer(ncount[2,1])
}
else{
if(as.integer(ncount[2,2]) == as.integer(ncount[3,2])) {
as.integer(nstring[nstring$각각불만유형 != 1,][1,1])
}
else as.integer(ncount[2,1])
} ,
if(dim(ncount)[1] == 1) {
as.integer(ncount[1,1])
}
else{
if(as.integer(ncount[1,2]) == as.integer(ncount[2,2])) {
as.integer(nstring[nstring$각각불만유형 != 1,][1,1])
}
else as.integer(ncount[1,1])
})
sobig$전체사건판정[i] = est
} |
library(ggplot2)
ggplot(data = mpg, aes(x = displ, y = hwy)) +
geom_point(mapping = aes(shape=class))
ggplot(data = mpg, aes(x = displ, y = hwy)) +
geom_point(mapping = aes(size=class, color = fl))
ggplot(data = mpg,aes(x = displ, y = hwy)) +
geom_point(color = "blue")
ggplot(data = mpg, aes(y = displ, x = hwy)) +
geom_point(mapping = aes(color = cty))
ggplot(data = mpg, aes(y = displ, x = hwy)) +
geom_point(mapping = aes(colour = displ < 5))
ggplot(data = mpg, aes(x = displ, y = hwy)) +
geom_point() +
facet_wrap(~ manufacturer)
ggplot(data = mpg, aes(x = displ, y = hwy, color = class)) +
geom_point() +
facet_wrap(~ manufacturer) +
theme_grey()
ggplot(mpg, aes(x = drv, y = hwy)) +
geom_violin()
ggplot(data = mpg, aes(x = fl)) +
geom_bar() +
stat_density()
| /ggplotintro.R | no_license | vermashresth/data-viz | R | false | false | 807 | r | library(ggplot2)
ggplot(data = mpg, aes(x = displ, y = hwy)) +
geom_point(mapping = aes(shape=class))
ggplot(data = mpg, aes(x = displ, y = hwy)) +
geom_point(mapping = aes(size=class, color = fl))
ggplot(data = mpg,aes(x = displ, y = hwy)) +
geom_point(color = "blue")
ggplot(data = mpg, aes(y = displ, x = hwy)) +
geom_point(mapping = aes(color = cty))
ggplot(data = mpg, aes(y = displ, x = hwy)) +
geom_point(mapping = aes(colour = displ < 5))
ggplot(data = mpg, aes(x = displ, y = hwy)) +
geom_point() +
facet_wrap(~ manufacturer)
ggplot(data = mpg, aes(x = displ, y = hwy, color = class)) +
geom_point() +
facet_wrap(~ manufacturer) +
theme_grey()
ggplot(mpg, aes(x = drv, y = hwy)) +
geom_violin()
ggplot(data = mpg, aes(x = fl)) +
geom_bar() +
stat_density()
|
setwd("C:\\Users\\Sherif\\Google Drive\\NYU\\4 Fall 14\\D3M\\R Data")
store_demo <- read.csv("store_demo.csv")
###############################################################################################
# Random Sampling
##############################################################################################
#check population means for couple of variables as check
mean(store_demo$capita_inc)
mean(store_demo$Share_Camp)
# Generate a random sample
library(dplyr)
set.seed(3)
rnorm(1)
samp1<-sample_n(store_demo,500)
# Check means in your sample
mean(samp1$capita_inc)
mean(samp1$Share_Camp)
################################################################################################
# Divide stores into Test & Control Markets
###############################################################################################
# Create an Index to divide stores in 2 Groups
index = sample(1:nrow(store_demo), size=0.5*nrow(store_demo))
# Test & Control Stores
test = store_demo[index,]
control =store_demo[-index,]
# Check means in test & control markets
mean(test$capita_inc)
mean(control$capita_inc)
mean(test$Share_Camp)
mean(control$Share_Camp)
# Add a column to test & control
test["type"]<-"test"
control["type"]<-"control"
# Stack both data into file (on top of eachother)
both<-rbind_list(test, control)
# Shares of Campbell by REGION as box plot
ggplot(both, aes(x=type, y=Share_Camp, fill=type))+geom_boxplot()
ggplot(both, aes(x=type, y=capita_inc, fill=type))+geom_boxplot()
summarise(group_by(both, type),mean=mean(Share_Camp), sd=sd(Share_Camp))
| /R Code/Random_sampling.R | no_license | Shero83/R-Practice | R | false | false | 1,656 | r |
setwd("C:\\Users\\Sherif\\Google Drive\\NYU\\4 Fall 14\\D3M\\R Data")
store_demo <- read.csv("store_demo.csv")
###############################################################################################
# Random Sampling
##############################################################################################
#check population means for couple of variables as check
mean(store_demo$capita_inc)
mean(store_demo$Share_Camp)
# Generate a random sample
library(dplyr)
set.seed(3)
rnorm(1)
samp1<-sample_n(store_demo,500)
# Check means in your sample
mean(samp1$capita_inc)
mean(samp1$Share_Camp)
################################################################################################
# Divide stores into Test & Control Markets
###############################################################################################
# Create an Index to divide stores in 2 Groups
index = sample(1:nrow(store_demo), size=0.5*nrow(store_demo))
# Test & Control Stores
test = store_demo[index,]
control =store_demo[-index,]
# Check means in test & control markets
mean(test$capita_inc)
mean(control$capita_inc)
mean(test$Share_Camp)
mean(control$Share_Camp)
# Add a column to test & control
test["type"]<-"test"
control["type"]<-"control"
# Stack both data into file (on top of eachother)
both<-rbind_list(test, control)
# Shares of Campbell by REGION as box plot
ggplot(both, aes(x=type, y=Share_Camp, fill=type))+geom_boxplot()
ggplot(both, aes(x=type, y=capita_inc, fill=type))+geom_boxplot()
summarise(group_by(both, type),mean=mean(Share_Camp), sd=sd(Share_Camp))
|
cat('\n\n');timestamp();cat('\n')
library(btergm)
library(parallel)
library(texreg)
data_dir <- '/home/sdowning/data/firm_nets_rnr2'
firm_i <- 'snap-surveys-ltd'
d <- 3
ncpus <- 4
parallel <- "multicore"
data_file <- file.path(data_dir,sprintf('%s_d%s.rds',firm_i,d))
nets <- readRDS(data_file)
nPeriods <- 11 ## 5
if (!("fits" %in% ls())) fits <- list()
if (!(firm_i %in% names(fits)) ) fits[[firm_i]] <- list()
if (nPeriods < length(nets)) nets <- nets[(length(nets)-nPeriods+1):length(nets)]
cat("\n------------ estimating TERGM for:",firm_i,'--------------\n')
cat(sprintf("Using %s cores\n", detectCores()))
## make MMC nets list
mmc <- lapply(nets, function(net) as.matrix(net %n% 'mmc'))
cpc <- lapply(nets, function(net) as.matrix(net %n% 'coop'))
cpp <- lapply(nets, function(net) as.matrix(net %n% 'coop_past'))
cpa <- lapply(nets, function(net) as.matrix(net %n% 'coop') + as.matrix(net %n% 'coop_past') )
cossim <- lapply(nets, function(net) as.matrix(net %n% 'cat_cos_sim'))
centjoin <- lapply(nets, function(net) as.matrix(net %n% 'joint_cent_pow_n0_4'))
centratio <- lapply(nets, function(net) as.matrix(net %n% 'cent_ratio_pow_n0_4'))
shcomp <- lapply(nets, function(net) as.matrix(net %n% 'shared_competitor'))
shinv <- lapply(nets, function(net) as.matrix(net %n% 'shared_investor_nd'))
####################### DEFINE MODELS ###################################
m4_7cycle <- nets ~ edges + gwesp(0, fixed = T) + gwdegree(0, fixed=T) +
nodematch("ipo_status", diff = F) +
nodematch("state_code", diff = F) +
nodecov("age") + absdiff("age") +
##nodecov("employee_na_age") +
##nodecov("sales_na_0_mn") +
edgecov(cossim) +
edgecov(centjoin) +
##edgecov(shcomp) +
edgecov(shinv) +
edgecov(mmc) +
##edgecov(cpa) +
##edgecov(cpc) +
##edgecov(cpp) +
memory(type = "stability", lag = 1) +
timecov(transform = function(t) t) +
nodecov("genidx_multilevel") +
nodecov("cent_pow_n0_4") + absdiff("cent_pow_n0_4") +
cycle(3) + cycle(4) + cycle(5) + cycle(6) + cycle(7)
################################ end models#######################
##
# DEFINE MODEL and MODEL NAME TO COMPUTE
##
m_x <- 'm4_7cycle'
##
# SET RESAMPLES
##
R <- 500
## RUN TERGM
fits[[firm_i]][[m_x]] <- btergm(get(m_x), R=R, parallel = parallel, ncpus = ncpus)
## SAVE SERIALIZED
fits.file <- sprintf('/home/sdowning/compnet/results/amj_rnr2/fit_%s_pd%s_R%s_%s.rds', firm_i, nPeriods, R, m_x)
saveRDS(fits, file=fits.file)
## SAVE FORMATTED REGRESSION TABLE
html.file <- sprintf('/home/sdowning/compnet/results/amj_rnr2/%s_tergm_results_pd%s_R%s_%s.html', firm_i, nPeriods, R, m_x)
htmlreg(fits[[firm_i]], digits = 2, file=html.file)
#### SAVE GOODNESS OF FIT
##gf <- gof(fits[[firm_i]][[m_x]], nsim=1000,
## statistics=c(dsp, esp, deg, geodesic, rocpr, walktrap.modularity))
##gof.file <- sprintf('/home/sdowning/compnet/results/amj_rnr2/gof_%s_pd%s_R%s_%s.rds', firm_i, nPeriods, R, m_x)
##saveRDS(gf, file=gof.file)
cat('finished successfully.')
| /R/amj_rnr2/awareness_AMJ_RNR_TERGM_m4-snap-surveys-ltd-7cycle.R | no_license | sdownin/compnet-venus | R | false | false | 3,021 | r | cat('\n\n');timestamp();cat('\n')
library(btergm)
library(parallel)
library(texreg)
data_dir <- '/home/sdowning/data/firm_nets_rnr2'
firm_i <- 'snap-surveys-ltd'
d <- 3
ncpus <- 4
parallel <- "multicore"
data_file <- file.path(data_dir,sprintf('%s_d%s.rds',firm_i,d))
nets <- readRDS(data_file)
nPeriods <- 11 ## 5
if (!("fits" %in% ls())) fits <- list()
if (!(firm_i %in% names(fits)) ) fits[[firm_i]] <- list()
if (nPeriods < length(nets)) nets <- nets[(length(nets)-nPeriods+1):length(nets)]
cat("\n------------ estimating TERGM for:",firm_i,'--------------\n')
cat(sprintf("Using %s cores\n", detectCores()))
## make MMC nets list
mmc <- lapply(nets, function(net) as.matrix(net %n% 'mmc'))
cpc <- lapply(nets, function(net) as.matrix(net %n% 'coop'))
cpp <- lapply(nets, function(net) as.matrix(net %n% 'coop_past'))
cpa <- lapply(nets, function(net) as.matrix(net %n% 'coop') + as.matrix(net %n% 'coop_past') )
cossim <- lapply(nets, function(net) as.matrix(net %n% 'cat_cos_sim'))
centjoin <- lapply(nets, function(net) as.matrix(net %n% 'joint_cent_pow_n0_4'))
centratio <- lapply(nets, function(net) as.matrix(net %n% 'cent_ratio_pow_n0_4'))
shcomp <- lapply(nets, function(net) as.matrix(net %n% 'shared_competitor'))
shinv <- lapply(nets, function(net) as.matrix(net %n% 'shared_investor_nd'))
####################### DEFINE MODELS ###################################
m4_7cycle <- nets ~ edges + gwesp(0, fixed = T) + gwdegree(0, fixed=T) +
nodematch("ipo_status", diff = F) +
nodematch("state_code", diff = F) +
nodecov("age") + absdiff("age") +
##nodecov("employee_na_age") +
##nodecov("sales_na_0_mn") +
edgecov(cossim) +
edgecov(centjoin) +
##edgecov(shcomp) +
edgecov(shinv) +
edgecov(mmc) +
##edgecov(cpa) +
##edgecov(cpc) +
##edgecov(cpp) +
memory(type = "stability", lag = 1) +
timecov(transform = function(t) t) +
nodecov("genidx_multilevel") +
nodecov("cent_pow_n0_4") + absdiff("cent_pow_n0_4") +
cycle(3) + cycle(4) + cycle(5) + cycle(6) + cycle(7)
################################ end models#######################
##
# DEFINE MODEL and MODEL NAME TO COMPUTE
##
m_x <- 'm4_7cycle'
##
# SET RESAMPLES
##
R <- 500
## RUN TERGM
fits[[firm_i]][[m_x]] <- btergm(get(m_x), R=R, parallel = parallel, ncpus = ncpus)
## SAVE SERIALIZED
fits.file <- sprintf('/home/sdowning/compnet/results/amj_rnr2/fit_%s_pd%s_R%s_%s.rds', firm_i, nPeriods, R, m_x)
saveRDS(fits, file=fits.file)
## SAVE FORMATTED REGRESSION TABLE
html.file <- sprintf('/home/sdowning/compnet/results/amj_rnr2/%s_tergm_results_pd%s_R%s_%s.html', firm_i, nPeriods, R, m_x)
htmlreg(fits[[firm_i]], digits = 2, file=html.file)
#### SAVE GOODNESS OF FIT
##gf <- gof(fits[[firm_i]][[m_x]], nsim=1000,
## statistics=c(dsp, esp, deg, geodesic, rocpr, walktrap.modularity))
##gof.file <- sprintf('/home/sdowning/compnet/results/amj_rnr2/gof_%s_pd%s_R%s_%s.rds', firm_i, nPeriods, R, m_x)
##saveRDS(gf, file=gof.file)
cat('finished successfully.')
|
library(ggplot2)
library(ggthemes)
library(reshape2)
## Part I: BIC
## Manually adjust
#filename <- 'numerical_ex6_05-25-20-13'
filename <- 'numerical_ex6_08-25-20-54'
n_grid_size <- 5
method_order <- c(5,4,1,2,3,6)
missing_fraction <- read.table(paste0(filename,'_setup_fraction.csv'),sep=',',header=FALSE)
n_missing_fraction <- length(missing_fraction)
mse_obs <- mse_missing <- bic <- runtime <- matrix(NA, 0, n_grid_size+1)
for (i in 1:n_missing_fraction) {
d_runtime <- read.table(paste0(filename,'_runtime_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_runtime[,n_grid_size+1] <- as.double(missing_fraction[i])
d_runtime[,n_grid_size+1] <- as.factor(d_runtime[,n_grid_size+1])
runtime <- rbind(runtime, d_runtime)
d_bic <- read.table(paste0(filename,'_bic_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_bic[,n_grid_size+1] <- as.double(missing_fraction[i])
d_bic[,n_grid_size+1] <- as.factor(d_bic[,n_grid_size+1])
bic <- rbind(bic, d_bic)
d_mse_missing <- read.table(paste0(filename,'_mse_missing_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_mse_missing[,n_grid_size+1] <- as.double(missing_fraction[i])
d_mse_missing[,n_grid_size+1] <- as.factor(d_mse_missing[,n_grid_size+1])
mse_missing <- rbind(mse_missing, d_mse_missing)
d_mse_obs <- read.table(paste0(filename,'_mse_obs_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_mse_obs[,n_grid_size+1] <- as.double(missing_fraction[i])
d_mse_obs[,n_grid_size+1] <- as.factor(d_mse_obs[,n_grid_size+1])
mse_obs <- rbind(mse_obs, d_mse_obs)
}
grid_size_labels <- c(paste0('BIC Grid (',c(10,19,37),')'), 'QN (E)','QN (H5)')
names(mse_obs) <- names(mse_missing) <- names(bic) <- names(runtime) <- c(grid_size_labels, 'Fraction Missing')
runtime <- runtime[,method_order]
runtime <- melt(runtime, id.vars='Fraction Missing')
runtime$value <- log10(runtime$value)
#q <- ggplot(data=runtime, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) + scale_y_log10()
bic <- bic[,method_order]
bic <- melt(bic, id.vars='Fraction Missing')
n <- 1e2
bic$value <- bic$value / (n**2)
#bic$value <- log10(bic$value)
#q <- ggplot(data=bic, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) #+ scale_y_log10()
mse_missing <- mse_missing[,method_order]
mse_missing <- melt(mse_missing, id.vars='Fraction Missing')
#mse_missing$value <- log10(mse_missing$value)
#q <- ggplot(data=mse_missing, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) #+ scale_y_log10()
mse_obs <- mse_obs[,method_order]
mse_obs <- melt(mse_obs, id.vars='Fraction Missing')
#mse_obs$value <- log10(mse_obs$value)
#q <- ggplot(data=mse_obs, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) #+ scale_y_log10()
## Create a single figures
runtime$metric <- 'log10(Time in sec)'
bic$metric <- 'BIC'
mse_missing$metric <- 'MSE over missing entries'
mse_obs$metric <- 'MSE over observed entries'
df <- rbind(mse_obs,rbind(mse_missing,rbind(bic, runtime)))
df$metric <- ordered(df$metric, levels = c('log10(Time in sec)', 'BIC', 'MSE over missing entries', 'MSE over observed entries'))
q <- ggplot(data=df, aes(x=`Fraction Missing`, y=value))
q <- q + geom_boxplot(aes(fill=variable)) + scale_fill_brewer(guide = guide_legend(title=NULL),palette="RdYlBu")
q + facet_wrap(~metric, scales='free_y') + ylab('') + theme(legend.text = element_text(size = 13), strip.text = element_text(size = 14), axis.text = element_text(size=14), axis.title=element_text(size=14))
golden_ratio <- 1.61803398875
height <- 7
filename <- 'numerical_ex6.pdf'
ggsave(filename, height=height, width=golden_ratio*height)
## Part II: AIC
rm(list=ls())
## Manually adjust
filename <- 'numerical_ex6_AIC_08-28-02-07'
n_grid_size <- 5
method_order <- c(5,4,1,2,3,6)
missing_fraction <- read.table(paste0(filename,'_setup_fraction.csv'),sep=',',header=FALSE)
n_missing_fraction <- length(missing_fraction)
mse_obs <- mse_missing <- aic <- runtime <- matrix(NA, 0, n_grid_size+1)
for (i in 1:n_missing_fraction) {
d_runtime <- read.table(paste0(filename,'_runtime_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_runtime[,n_grid_size+1] <- as.double(missing_fraction[i])
d_runtime[,n_grid_size+1] <- as.factor(d_runtime[,n_grid_size+1])
runtime <- rbind(runtime, d_runtime)
d_aic <- read.table(paste0(filename,'_aic_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_aic[,n_grid_size+1] <- as.double(missing_fraction[i])
d_aic[,n_grid_size+1] <- as.factor(d_aic[,n_grid_size+1])
aic <- rbind(aic, d_aic)
d_mse_missing <- read.table(paste0(filename,'_mse_missing_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_mse_missing[,n_grid_size+1] <- as.double(missing_fraction[i])
d_mse_missing[,n_grid_size+1] <- as.factor(d_mse_missing[,n_grid_size+1])
mse_missing <- rbind(mse_missing, d_mse_missing)
d_mse_obs <- read.table(paste0(filename,'_mse_obs_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_mse_obs[,n_grid_size+1] <- as.double(missing_fraction[i])
d_mse_obs[,n_grid_size+1] <- as.factor(d_mse_obs[,n_grid_size+1])
mse_obs <- rbind(mse_obs, d_mse_obs)
}
grid_size_labels <- c(paste0('AIC Grid (',c(10,19,37),')'), 'QN (E)','QN (H5)')
names(mse_obs) <- names(mse_missing) <- names(aic) <- names(runtime) <- c(grid_size_labels, 'Fraction Missing')
runtime <- runtime[,method_order]
runtime <- melt(runtime, id.vars='Fraction Missing')
runtime$value <- log10(runtime$value)
#q <- ggplot(data=runtime, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) + scale_y_log10()
aic <- aic[,method_order]
aic <- melt(aic, id.vars='Fraction Missing')
n <- 1e2
aic$value <- aic$value / (n**2)
#aic$value <- log10(aic$value)
#q <- ggplot(data=aic, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) #+ scale_y_log10()
mse_missing <- mse_missing[,method_order]
mse_missing <- melt(mse_missing, id.vars='Fraction Missing')
#mse_missing$value <- log10(mse_missing$value)
#q <- ggplot(data=mse_missing, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) #+ scale_y_log10()
mse_obs <- mse_obs[,method_order]
mse_obs <- melt(mse_obs, id.vars='Fraction Missing')
#mse_obs$value <- log10(mse_obs$value)
#q <- ggplot(data=mse_obs, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) #+ scale_y_log10()
## Create a single figures
runtime$metric <- 'log10(Time in sec)'
aic$metric <- 'AIC'
mse_missing$metric <- 'MSE over missing entries'
mse_obs$metric <- 'MSE over observed entries'
df <- rbind(mse_obs,rbind(mse_missing,rbind(aic, runtime)))
df$metric <- ordered(df$metric, levels = c('log10(Time in sec)', 'AIC', 'MSE over missing entries', 'MSE over observed entries'))
q <- ggplot(data=df, aes(x=`Fraction Missing`, y=value))
q <- q + geom_boxplot(aes(fill=variable)) + scale_fill_brewer(guide = guide_legend(title=NULL),palette="RdYlBu")
q + facet_wrap(~metric, scales='free_y') + ylab('') + theme(legend.text = element_text(size = 13), strip.text = element_text(size = 14), axis.text = element_text(size=14), axis.title=element_text(size=14))
golden_ratio <- 1.61803398875
height <- 7
filename <- 'numerical_ex6_AIC.pdf'
ggsave(filename, height=height, width=golden_ratio*height) | /Code/Figure5_plot.R | no_license | echi/IMS | R | false | false | 7,278 | r | library(ggplot2)
library(ggthemes)
library(reshape2)
## Part I: BIC
## Manually adjust
#filename <- 'numerical_ex6_05-25-20-13'
filename <- 'numerical_ex6_08-25-20-54'
n_grid_size <- 5
method_order <- c(5,4,1,2,3,6)
missing_fraction <- read.table(paste0(filename,'_setup_fraction.csv'),sep=',',header=FALSE)
n_missing_fraction <- length(missing_fraction)
mse_obs <- mse_missing <- bic <- runtime <- matrix(NA, 0, n_grid_size+1)
for (i in 1:n_missing_fraction) {
d_runtime <- read.table(paste0(filename,'_runtime_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_runtime[,n_grid_size+1] <- as.double(missing_fraction[i])
d_runtime[,n_grid_size+1] <- as.factor(d_runtime[,n_grid_size+1])
runtime <- rbind(runtime, d_runtime)
d_bic <- read.table(paste0(filename,'_bic_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_bic[,n_grid_size+1] <- as.double(missing_fraction[i])
d_bic[,n_grid_size+1] <- as.factor(d_bic[,n_grid_size+1])
bic <- rbind(bic, d_bic)
d_mse_missing <- read.table(paste0(filename,'_mse_missing_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_mse_missing[,n_grid_size+1] <- as.double(missing_fraction[i])
d_mse_missing[,n_grid_size+1] <- as.factor(d_mse_missing[,n_grid_size+1])
mse_missing <- rbind(mse_missing, d_mse_missing)
d_mse_obs <- read.table(paste0(filename,'_mse_obs_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_mse_obs[,n_grid_size+1] <- as.double(missing_fraction[i])
d_mse_obs[,n_grid_size+1] <- as.factor(d_mse_obs[,n_grid_size+1])
mse_obs <- rbind(mse_obs, d_mse_obs)
}
grid_size_labels <- c(paste0('BIC Grid (',c(10,19,37),')'), 'QN (E)','QN (H5)')
names(mse_obs) <- names(mse_missing) <- names(bic) <- names(runtime) <- c(grid_size_labels, 'Fraction Missing')
runtime <- runtime[,method_order]
runtime <- melt(runtime, id.vars='Fraction Missing')
runtime$value <- log10(runtime$value)
#q <- ggplot(data=runtime, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) + scale_y_log10()
bic <- bic[,method_order]
bic <- melt(bic, id.vars='Fraction Missing')
n <- 1e2
bic$value <- bic$value / (n**2)
#bic$value <- log10(bic$value)
#q <- ggplot(data=bic, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) #+ scale_y_log10()
mse_missing <- mse_missing[,method_order]
mse_missing <- melt(mse_missing, id.vars='Fraction Missing')
#mse_missing$value <- log10(mse_missing$value)
#q <- ggplot(data=mse_missing, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) #+ scale_y_log10()
mse_obs <- mse_obs[,method_order]
mse_obs <- melt(mse_obs, id.vars='Fraction Missing')
#mse_obs$value <- log10(mse_obs$value)
#q <- ggplot(data=mse_obs, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) #+ scale_y_log10()
## Create a single figures
runtime$metric <- 'log10(Time in sec)'
bic$metric <- 'BIC'
mse_missing$metric <- 'MSE over missing entries'
mse_obs$metric <- 'MSE over observed entries'
df <- rbind(mse_obs,rbind(mse_missing,rbind(bic, runtime)))
df$metric <- ordered(df$metric, levels = c('log10(Time in sec)', 'BIC', 'MSE over missing entries', 'MSE over observed entries'))
q <- ggplot(data=df, aes(x=`Fraction Missing`, y=value))
q <- q + geom_boxplot(aes(fill=variable)) + scale_fill_brewer(guide = guide_legend(title=NULL),palette="RdYlBu")
q + facet_wrap(~metric, scales='free_y') + ylab('') + theme(legend.text = element_text(size = 13), strip.text = element_text(size = 14), axis.text = element_text(size=14), axis.title=element_text(size=14))
golden_ratio <- 1.61803398875
height <- 7
filename <- 'numerical_ex6.pdf'
ggsave(filename, height=height, width=golden_ratio*height)
## Part II: AIC
rm(list=ls())
## Manually adjust
filename <- 'numerical_ex6_AIC_08-28-02-07'
n_grid_size <- 5
method_order <- c(5,4,1,2,3,6)
missing_fraction <- read.table(paste0(filename,'_setup_fraction.csv'),sep=',',header=FALSE)
n_missing_fraction <- length(missing_fraction)
mse_obs <- mse_missing <- aic <- runtime <- matrix(NA, 0, n_grid_size+1)
for (i in 1:n_missing_fraction) {
d_runtime <- read.table(paste0(filename,'_runtime_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_runtime[,n_grid_size+1] <- as.double(missing_fraction[i])
d_runtime[,n_grid_size+1] <- as.factor(d_runtime[,n_grid_size+1])
runtime <- rbind(runtime, d_runtime)
d_aic <- read.table(paste0(filename,'_aic_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_aic[,n_grid_size+1] <- as.double(missing_fraction[i])
d_aic[,n_grid_size+1] <- as.factor(d_aic[,n_grid_size+1])
aic <- rbind(aic, d_aic)
d_mse_missing <- read.table(paste0(filename,'_mse_missing_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_mse_missing[,n_grid_size+1] <- as.double(missing_fraction[i])
d_mse_missing[,n_grid_size+1] <- as.factor(d_mse_missing[,n_grid_size+1])
mse_missing <- rbind(mse_missing, d_mse_missing)
d_mse_obs <- read.table(paste0(filename,'_mse_obs_',missing_fraction[i],'.csv'),sep=',',header=FALSE)
d_mse_obs[,n_grid_size+1] <- as.double(missing_fraction[i])
d_mse_obs[,n_grid_size+1] <- as.factor(d_mse_obs[,n_grid_size+1])
mse_obs <- rbind(mse_obs, d_mse_obs)
}
grid_size_labels <- c(paste0('AIC Grid (',c(10,19,37),')'), 'QN (E)','QN (H5)')
names(mse_obs) <- names(mse_missing) <- names(aic) <- names(runtime) <- c(grid_size_labels, 'Fraction Missing')
runtime <- runtime[,method_order]
runtime <- melt(runtime, id.vars='Fraction Missing')
runtime$value <- log10(runtime$value)
#q <- ggplot(data=runtime, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) + scale_y_log10()
aic <- aic[,method_order]
aic <- melt(aic, id.vars='Fraction Missing')
n <- 1e2
aic$value <- aic$value / (n**2)
#aic$value <- log10(aic$value)
#q <- ggplot(data=aic, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) #+ scale_y_log10()
mse_missing <- mse_missing[,method_order]
mse_missing <- melt(mse_missing, id.vars='Fraction Missing')
#mse_missing$value <- log10(mse_missing$value)
#q <- ggplot(data=mse_missing, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) #+ scale_y_log10()
mse_obs <- mse_obs[,method_order]
mse_obs <- melt(mse_obs, id.vars='Fraction Missing')
#mse_obs$value <- log10(mse_obs$value)
#q <- ggplot(data=mse_obs, aes(x=`Fraction Missing`, y=value))
#q + geom_boxplot(aes(fill=variable)) #+ scale_y_log10()
## Create a single figures
runtime$metric <- 'log10(Time in sec)'
aic$metric <- 'AIC'
mse_missing$metric <- 'MSE over missing entries'
mse_obs$metric <- 'MSE over observed entries'
df <- rbind(mse_obs,rbind(mse_missing,rbind(aic, runtime)))
df$metric <- ordered(df$metric, levels = c('log10(Time in sec)', 'AIC', 'MSE over missing entries', 'MSE over observed entries'))
q <- ggplot(data=df, aes(x=`Fraction Missing`, y=value))
q <- q + geom_boxplot(aes(fill=variable)) + scale_fill_brewer(guide = guide_legend(title=NULL),palette="RdYlBu")
q + facet_wrap(~metric, scales='free_y') + ylab('') + theme(legend.text = element_text(size = 13), strip.text = element_text(size = 14), axis.text = element_text(size=14), axis.title=element_text(size=14))
golden_ratio <- 1.61803398875
height <- 7
filename <- 'numerical_ex6_AIC.pdf'
ggsave(filename, height=height, width=golden_ratio*height) |
library(shiny)
library(tidyverse)
library(plotly)
library(stringr)
library(forcats)
library(ggstance)
library(extrafont)
library(shinyjs)
library(shinyWidgets)
source("Functions.R")
# User interface ----
ui <- fluidPage(
tags$head(includeHTML(("site_tag.html"))),
# setBackgroundColor("#3d6594"),
useShinyjs(),
panel(),
sidebarLayout(
sidebarPanel(
div(
id = "form",
selectInput("Institutions",
label = "Which school are you applying to?",
choices = unique_institutions,
multiple = F,
selected = "Columbia University"),
checkboxGroupInput("years",
label = "Include results from:",
choices = years[[1]],
selected = c(2020, 2019),
inline = TRUE),
checkboxGroupInput("decisions",
label = "Include posts including:",
choices = decisions[[1]],
selected = c("Accepted", "Rejected"),
inline = TRUE),
actionButton("resetAll", "Reset all")
)
),
mainPanel(
# fluidRow(
# column(11,
# panel(
# p(strong("Disclaimer: "), "This project was conducted independently of the ", a("GradCafe", href = "https://www.thegradcafe.com/"), " team and any mistake is the author's. None of the information displayed comes from official sources."),
# p("Work in progress. Contact the author on ", a("Twitter", href = "https://twitter.com/MartinDevaux"), " or ", a("GitHub", href = "https://github.com/MartinDevaux"), ".")
# ))
# ),
fluidRow(
column(11,
panel(
plotlyOutput("calendar_viz")
))
),
fluidRow(
column(11,
panel(
p("Over the period:"),
textOutput("first_acceptance"),
textOutput("first_rejection"),
textOutput("first_interview"),
textOutput("first_waitlist")
))
))
)
)
# Server logic ----
server <- function(input, output) {
output$calendar_viz <- renderPlotly({
decision_calendar(input$Institutions,
input$decisions,
input$years)
})
output$first_acceptance <- renderText({
first_acceptance(input$Institutions,
input$decisions,
input$years)
})
output$first_rejection <- renderText({
first_rejection(input$Institutions,
input$decisions,
input$years)
})
output$first_waitlist <- renderText({
first_waitlist(input$Institutions,
input$decisions,
input$years)
})
output$first_interview <- renderText({
first_interview(input$Institutions,
input$decisions,
input$years)
})
observeEvent(input$resetAll, {
reset("form")
})
}
# Run app ----
shinyApp(ui, server) | /app.R | no_license | MartinDevaux/GradCafe | R | false | false | 3,039 | r | library(shiny)
library(tidyverse)
library(plotly)
library(stringr)
library(forcats)
library(ggstance)
library(extrafont)
library(shinyjs)
library(shinyWidgets)
source("Functions.R")
# User interface ----
ui <- fluidPage(
tags$head(includeHTML(("site_tag.html"))),
# setBackgroundColor("#3d6594"),
useShinyjs(),
panel(),
sidebarLayout(
sidebarPanel(
div(
id = "form",
selectInput("Institutions",
label = "Which school are you applying to?",
choices = unique_institutions,
multiple = F,
selected = "Columbia University"),
checkboxGroupInput("years",
label = "Include results from:",
choices = years[[1]],
selected = c(2020, 2019),
inline = TRUE),
checkboxGroupInput("decisions",
label = "Include posts including:",
choices = decisions[[1]],
selected = c("Accepted", "Rejected"),
inline = TRUE),
actionButton("resetAll", "Reset all")
)
),
mainPanel(
# fluidRow(
# column(11,
# panel(
# p(strong("Disclaimer: "), "This project was conducted independently of the ", a("GradCafe", href = "https://www.thegradcafe.com/"), " team and any mistake is the author's. None of the information displayed comes from official sources."),
# p("Work in progress. Contact the author on ", a("Twitter", href = "https://twitter.com/MartinDevaux"), " or ", a("GitHub", href = "https://github.com/MartinDevaux"), ".")
# ))
# ),
fluidRow(
column(11,
panel(
plotlyOutput("calendar_viz")
))
),
fluidRow(
column(11,
panel(
p("Over the period:"),
textOutput("first_acceptance"),
textOutput("first_rejection"),
textOutput("first_interview"),
textOutput("first_waitlist")
))
))
)
)
# Server logic ----
server <- function(input, output) {
output$calendar_viz <- renderPlotly({
decision_calendar(input$Institutions,
input$decisions,
input$years)
})
output$first_acceptance <- renderText({
first_acceptance(input$Institutions,
input$decisions,
input$years)
})
output$first_rejection <- renderText({
first_rejection(input$Institutions,
input$decisions,
input$years)
})
output$first_waitlist <- renderText({
first_waitlist(input$Institutions,
input$decisions,
input$years)
})
output$first_interview <- renderText({
first_interview(input$Institutions,
input$decisions,
input$years)
})
observeEvent(input$resetAll, {
reset("form")
})
}
# Run app ----
shinyApp(ui, server) |
#Download data file
url<-'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
path<-file.path(getwd(),"hpc.zip")
download.file(url,path)
household_power_consumption<-read.csv( unz(path, "household_power_consumption.txt"), sep=";", quote="",na="?")
dim(household_power_consumption) #2075259 9
#subset data
hpc<-household_power_consumption[household_power_consumption$Date %in% c('1/2/2007','2/2/2007'),]
dim(hpc) #2880 9
#Plot1 building
hpc$Global_active_power<-as.numeric(hpc$Global_active_power)
png(filename="plot1.png", width=480, height=480, units="px", bg="transparent")
hist(hpc$Global_active_power, col="red",main="Global Active Power",xlab="Global Active Power (Kilowatts)",breaks=12,ylim=c(0,1200))
dev.off()
| /plot1.R | no_license | IgorOnyshchenko/Exploratory_data_analysis_in_R_v1 | R | false | false | 758 | r | #Download data file
url<-'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
path<-file.path(getwd(),"hpc.zip")
download.file(url,path)
household_power_consumption<-read.csv( unz(path, "household_power_consumption.txt"), sep=";", quote="",na="?")
dim(household_power_consumption) #2075259 9
#subset data
hpc<-household_power_consumption[household_power_consumption$Date %in% c('1/2/2007','2/2/2007'),]
dim(hpc) #2880 9
#Plot1 building
hpc$Global_active_power<-as.numeric(hpc$Global_active_power)
png(filename="plot1.png", width=480, height=480, units="px", bg="transparent")
hist(hpc$Global_active_power, col="red",main="Global Active Power",xlab="Global Active Power (Kilowatts)",breaks=12,ylim=c(0,1200))
dev.off()
|
#' ATP Match Odds Lookup Table
#'
#' This dataset contains the match id and corresponding player and match id from the \code{atp_matches}.
#'
#' \itemize{
#' \item id. Character id for the match
#' \item winner_id. Character id for the winning player as in \code{atp_matches}.
#' \item loser_id. Character id for the losing player as in \code{atp_matches}.
#' \item tourney_id. Character id for the tournament as in \code{atp_matches}.
#' \item match_id. Character id for the match id as in \code{atp_matches}.
#'}
#'
#' @details If \code{match_id} is NA it means a certain match could not be found
#' @format A data frame with 48,249 rows and 5 variables
#' @name atp_odds_match_lookup
NULL | /R/atp_odds_match_lookup.R | no_license | skoval/deuce | R | false | false | 701 | r | #' ATP Match Odds Lookup Table
#'
#' This dataset contains the match id and corresponding player and match id from the \code{atp_matches}.
#'
#' \itemize{
#' \item id. Character id for the match
#' \item winner_id. Character id for the winning player as in \code{atp_matches}.
#' \item loser_id. Character id for the losing player as in \code{atp_matches}.
#' \item tourney_id. Character id for the tournament as in \code{atp_matches}.
#' \item match_id. Character id for the match id as in \code{atp_matches}.
#'}
#'
#' @details If \code{match_id} is NA it means a certain match could not be found
#' @format A data frame with 48,249 rows and 5 variables
#' @name atp_odds_match_lookup
NULL |
loading_algoritms <- function() {
library(mlr3)
library(mlr3learners)
library(party)
library(mlr3pipelines)
library(mlr3tuning)
}
#XGB classif.xgboost
#Random Forest classif.ranger
#Logistic regression classif.glmnet
#SVM classif.svm
# Funkcja do cross validacij
cv_tuning <- function(task, classificator, param_grid) {
ctrl = makeTuneControlGrid()
rdesc = makeResampleDesc("CV", iters = 5L)
res = tuneParams(
classificator,
task = task,
resampling = rdesc,
par.set = param_grid,
control = ctrl
)
return(res)
}
learining <- function(target, data_encoding, data_no_encoding, train_index, test_index, encoding_where_unnessesery = TRUE) {
# chyba nie ma co tumaczyć
# dane mają zawierać zmienną celu i być nie kodowane
# podajemy ramke po kodowaniu i przed
# DO POPRAWIENIA NA PIPLINE JAK KTOŚ OGARNIE JAK TO DO KURWY ZROBIĆ
# Zwraca liste z predykcjami odpowiedni xgb,randomForest , regresja ,SVM
# Tworzenie w chuj tasków bo nie widzę prostrzego sposobu ponieważ R ssie
train_task_encoded = makeClassifTask(id = "train_task_encoded",
data = data_encoding[train_index, ],
target = target)
train_task_no_encoded = makeClassifTask(id = "train_task_no_encoded",
data = data_no_encoding[train_index, ],
target = target)
task_encoded = makeClassifTask(id = "task_encoded",
data = data_encoding,
target = target)
task_no_encoded = makeClassifTask(id = "task_no_encoded",
data = data_no_encoding,
target = target)
# Specjalnie dla xgb bo on pierdoli zasady
test_task_encoded = makeClassifTask(id = "test_task_encoded",
data = data_encoding[test_index, ],
target = target)
test_task_no_encoded = makeClassifTask(id = "test_task_no_encoded",
data = data_no_encoding[test_index, ],
target = target)
# XGB
xgb_learner <- makeLearner(
"classif.xgboost",
predict.type = "response",
par.vals = list(objective = "binary:logistic")
)
# parametry dla xgb
discrete_ps = makeParamSet(
makeDiscreteParam("eta", values = c(0.01, 0.1, 0.2, 0.3, 0.5)),
makeDiscreteParam("gamma", values = c(0.1, 0.5, 1, 5)),
makeDiscreteParam('max_depth', values = c(5, 6, 7, 10, 12)),
makeDiscreteParam('subsample', values = c(0.4, 0.6, 0.8, 0.1))
)
if (encoding_where_unnessesery) {
res_xgb <- cv_tuning(train_task_encoded, xgb_learner, discrete_ps)
} else {
res_xgb <- cv_tuning(train_task_no_encoded, xgb_learner, discrete_ps)
}
xgb_learner <- makeLearner(
"classif.xgboost",
predict.type = "response",
par.vals = list(
objective = "binary:logistic",
eta = res_xgb$x$eta,
gamma = res_xgb$x$gamma,
max_depth = res_xgb$x$max_depth,
subsample = res_xgb$x$subsample
)
)
# training
if (encoding_where_unnessesery) {
xgb_model <- train(xgb_learner, train_task_encoded)
} else {
xgb_model <- train(xgb_learner, train_task_no_encoded)
}
# Resoult
if (encoding_where_unnessesery) {
xgb_result <- predict(xgb_model, test_task_encoded)$data[, 3]
} else {
xgb_result <- predict(xgb_model, test_task_no_encoded)$data[, 3]
}
#RADNOM FOREST
discrete_ps = makeParamSet(
makeDiscreteParam("num.trees", values = c(50, 100, 200, 300, 400, 500)),
makeDiscreteParam('min.node.size',values = c(1,2,3,4,5))
#makeDiscreteParam('mtry', sample(seq(2, ncol(data_encoding)-1), 4))
#makeDiscreteParam('max.depth',values = c(0.5,1))
)
if (encoding_where_unnessesery){
res_rf <- cv_tuning(train_task_encoded,'classif.ranger', discrete_ps )}
else {
res_rf<- cv_tuning(train_task_no_encoded,'classif.ranger',discrete_ps)}
lerner_randomForest <- makeLearner(
"classif.ranger",
predict.type = "response",
par.vals = list(num.trees= res_rf$x$num.trees, min.node.size = res_rf$x$min.node.size)
)
if (encoding_where_unnessesery){
lerner_randomForest <- train(lerner_randomForest,train_task_encoded)}
else {
lerner_randomForest <- train(lerner_randomForest,train_task_no_encoded)}
if (encoding_where_unnessesery){
rf_results <- predict(lerner_randomForest,test_task_encoded)$data[,3]}
else {
rf_results <- predict(lerner_randomForest,test_task_no_encoded)$data[,3]}
# Logistic Regression
logistic.learner <- makeLearner("classif.glmnet", predict.type = "response")
discrete_ps = makeParamSet(makeDiscreteParam("alpha", values = c(0, 0.2,0.4,0.6,0.8,1)),
makeDiscreteParam('nlambda', values = c(50, 100,150)))
res_lr <- cv_tuning(train_task_encoded, logistic.learner, discrete_ps)
logistic.learner <- makeLearner(
"classif.glmnet",
predict.type = "response",
par.vals = list(alpha = res_lr$x$alpha, nlambda = res_lr$x$nlambda)
)
lg_model <- train(logistic.learner, train_task_encoded)
lg_result <- predict(lg_model, test_task_encoded)$data[, 3]
# SVM
discrete_ps = makeParamSet(
makeDiscreteParam('gamma',values = c(0.001,0.01,0.1,0.3,0.6,0.8,1)),
makeDiscreteParam('kernel',values = c('radial'))
)
res_SVM <- cv_tuning(train_task_encoded,'classif.svm',discrete_ps)
lerner_SVM <- makeLearner(
"classif.svm",
predict.type = "response",
par.vals = list(gamma= res_SVM$x$gamma, kernel = res_SVM$x$kernel)
)
lerner_SVM <- train(lerner_SVM,train_task_encoded)
SVM_result <- predict(lerner_SVM,test_task_encoded)$data[,3]
return(list(xgb_result, lg_result, SVM_result, rf_results))
}
#TEST
# task <- OpenML::getOMLDataSet(data.id = 31)
# df <- task$data
# df_2 <- Filter(is.numeric, df)
# df_2$class <- df$class
#
# data <- TaskClassif$new(id = "test",
# backend = df_2,
# target = "class")
# tr <- sample(data$nrow, 0.8 * data$nrow)
# tst <- setdiff(seq_len(data$nrow), tr)
#
# r_test <- learining('class', df_2, df_2, train_index = tr, test_index = tst)
| /algorytmy.R | no_license | arctickey/WB_PD2 | R | false | false | 6,654 | r | loading_algoritms <- function() {
library(mlr3)
library(mlr3learners)
library(party)
library(mlr3pipelines)
library(mlr3tuning)
}
#XGB classif.xgboost
#Random Forest classif.ranger
#Logistic regression classif.glmnet
#SVM classif.svm
# Funkcja do cross validacij
cv_tuning <- function(task, classificator, param_grid) {
ctrl = makeTuneControlGrid()
rdesc = makeResampleDesc("CV", iters = 5L)
res = tuneParams(
classificator,
task = task,
resampling = rdesc,
par.set = param_grid,
control = ctrl
)
return(res)
}
learining <- function(target, data_encoding, data_no_encoding, train_index, test_index, encoding_where_unnessesery = TRUE) {
# chyba nie ma co tumaczyć
# dane mają zawierać zmienną celu i być nie kodowane
# podajemy ramke po kodowaniu i przed
# DO POPRAWIENIA NA PIPLINE JAK KTOŚ OGARNIE JAK TO DO KURWY ZROBIĆ
# Zwraca liste z predykcjami odpowiedni xgb,randomForest , regresja ,SVM
# Tworzenie w chuj tasków bo nie widzę prostrzego sposobu ponieważ R ssie
train_task_encoded = makeClassifTask(id = "train_task_encoded",
data = data_encoding[train_index, ],
target = target)
train_task_no_encoded = makeClassifTask(id = "train_task_no_encoded",
data = data_no_encoding[train_index, ],
target = target)
task_encoded = makeClassifTask(id = "task_encoded",
data = data_encoding,
target = target)
task_no_encoded = makeClassifTask(id = "task_no_encoded",
data = data_no_encoding,
target = target)
# Specjalnie dla xgb bo on pierdoli zasady
test_task_encoded = makeClassifTask(id = "test_task_encoded",
data = data_encoding[test_index, ],
target = target)
test_task_no_encoded = makeClassifTask(id = "test_task_no_encoded",
data = data_no_encoding[test_index, ],
target = target)
# XGB
xgb_learner <- makeLearner(
"classif.xgboost",
predict.type = "response",
par.vals = list(objective = "binary:logistic")
)
# parametry dla xgb
discrete_ps = makeParamSet(
makeDiscreteParam("eta", values = c(0.01, 0.1, 0.2, 0.3, 0.5)),
makeDiscreteParam("gamma", values = c(0.1, 0.5, 1, 5)),
makeDiscreteParam('max_depth', values = c(5, 6, 7, 10, 12)),
makeDiscreteParam('subsample', values = c(0.4, 0.6, 0.8, 0.1))
)
if (encoding_where_unnessesery) {
res_xgb <- cv_tuning(train_task_encoded, xgb_learner, discrete_ps)
} else {
res_xgb <- cv_tuning(train_task_no_encoded, xgb_learner, discrete_ps)
}
xgb_learner <- makeLearner(
"classif.xgboost",
predict.type = "response",
par.vals = list(
objective = "binary:logistic",
eta = res_xgb$x$eta,
gamma = res_xgb$x$gamma,
max_depth = res_xgb$x$max_depth,
subsample = res_xgb$x$subsample
)
)
# training
if (encoding_where_unnessesery) {
xgb_model <- train(xgb_learner, train_task_encoded)
} else {
xgb_model <- train(xgb_learner, train_task_no_encoded)
}
# Resoult
if (encoding_where_unnessesery) {
xgb_result <- predict(xgb_model, test_task_encoded)$data[, 3]
} else {
xgb_result <- predict(xgb_model, test_task_no_encoded)$data[, 3]
}
#RADNOM FOREST
discrete_ps = makeParamSet(
makeDiscreteParam("num.trees", values = c(50, 100, 200, 300, 400, 500)),
makeDiscreteParam('min.node.size',values = c(1,2,3,4,5))
#makeDiscreteParam('mtry', sample(seq(2, ncol(data_encoding)-1), 4))
#makeDiscreteParam('max.depth',values = c(0.5,1))
)
if (encoding_where_unnessesery){
res_rf <- cv_tuning(train_task_encoded,'classif.ranger', discrete_ps )}
else {
res_rf<- cv_tuning(train_task_no_encoded,'classif.ranger',discrete_ps)}
lerner_randomForest <- makeLearner(
"classif.ranger",
predict.type = "response",
par.vals = list(num.trees= res_rf$x$num.trees, min.node.size = res_rf$x$min.node.size)
)
if (encoding_where_unnessesery){
lerner_randomForest <- train(lerner_randomForest,train_task_encoded)}
else {
lerner_randomForest <- train(lerner_randomForest,train_task_no_encoded)}
if (encoding_where_unnessesery){
rf_results <- predict(lerner_randomForest,test_task_encoded)$data[,3]}
else {
rf_results <- predict(lerner_randomForest,test_task_no_encoded)$data[,3]}
# Logistic Regression
logistic.learner <- makeLearner("classif.glmnet", predict.type = "response")
discrete_ps = makeParamSet(makeDiscreteParam("alpha", values = c(0, 0.2,0.4,0.6,0.8,1)),
makeDiscreteParam('nlambda', values = c(50, 100,150)))
res_lr <- cv_tuning(train_task_encoded, logistic.learner, discrete_ps)
logistic.learner <- makeLearner(
"classif.glmnet",
predict.type = "response",
par.vals = list(alpha = res_lr$x$alpha, nlambda = res_lr$x$nlambda)
)
lg_model <- train(logistic.learner, train_task_encoded)
lg_result <- predict(lg_model, test_task_encoded)$data[, 3]
# SVM
discrete_ps = makeParamSet(
makeDiscreteParam('gamma',values = c(0.001,0.01,0.1,0.3,0.6,0.8,1)),
makeDiscreteParam('kernel',values = c('radial'))
)
res_SVM <- cv_tuning(train_task_encoded,'classif.svm',discrete_ps)
lerner_SVM <- makeLearner(
"classif.svm",
predict.type = "response",
par.vals = list(gamma= res_SVM$x$gamma, kernel = res_SVM$x$kernel)
)
lerner_SVM <- train(lerner_SVM,train_task_encoded)
SVM_result <- predict(lerner_SVM,test_task_encoded)$data[,3]
return(list(xgb_result, lg_result, SVM_result, rf_results))
}
#TEST
# task <- OpenML::getOMLDataSet(data.id = 31)
# df <- task$data
# df_2 <- Filter(is.numeric, df)
# df_2$class <- df$class
#
# data <- TaskClassif$new(id = "test",
# backend = df_2,
# target = "class")
# tr <- sample(data$nrow, 0.8 * data$nrow)
# tst <- setdiff(seq_len(data$nrow), tr)
#
# r_test <- learining('class', df_2, df_2, train_index = tr, test_index = tst)
|
#' Bagging with Classification Trees
#'
#' Fits the Bagging algorithm proposed by Breiman in 1996 using classification
#' trees as single classifiers.
#'
#' @param mfinal number of trees to use.
#' @param minsplit minimum number of observations that must exist in a node in
#' order for a split to be attempted.
#' @param minbucket minimum number of observations in any terminal node.
#' @param cp complexity parameter.
#' @param maxcompete number of competitor splits retained in the output.
#' @param maxsurrogate number of surrogate splits retained in the output.
#' @param usesurrogate how to use surrogates in the splitting process.
#' @param xval number of cross-validations.
#' @param surrogatestyle controls the selection of a best surrogate.
#' @param maxdepth maximum depth of any node of the final tree, with the root
#' node counted as depth 0.
#'
#' @details
#' \describe{
#' \item{Response types:}{\code{factor}}
#' \item{\link[=TunedModel]{Automatic tuning} of grid parameters:}{
#' \code{mfinal}, \code{maxdepth}
#' }
#' }
#'
#' Further model details can be found in the source link below.
#'
#' @return \code{MLModel} class object.
#'
#' @seealso \code{\link[adabag]{bagging}}, \code{\link{fit}},
#' \code{\link{resample}}
#'
#' @examples
#' \donttest{
#' ## Requires prior installation of suggested package adabag to run
#'
#' fit(Species ~ ., data = iris, model = AdaBagModel(mfinal = 5))
#' }
#'
AdaBagModel <- function(
mfinal = 100, minsplit = 20, minbucket = round(minsplit/3), cp = 0.01,
maxcompete = 4, maxsurrogate = 5, usesurrogate = 2, xval = 10,
surrogatestyle = 0, maxdepth = 30
) {
MLModel(
name = "AdaBagModel",
label = "Bagging with Classification Trees",
packages = "adabag",
response_types = "factor",
predictor_encoding = "model.frame",
na.rm = FALSE,
params = new_params(environment()),
gridinfo = new_gridinfo(
param = c("mfinal", "maxdepth"),
get_values = c(
function(n, ...) round_int(seq_range(0, 25, c(1, 200), n + 1)),
function(n, ...) seq_len(min(n, 30))
)
),
fit = function(formula, data, weights, mfinal, ...) {
adabag::bagging(
formula, data = as.data.frame(formula, data), mfinal = mfinal,
control = list(...)
)
},
predict = function(object, newdata, ...) {
newdata <- as.data.frame(newdata)
predict(object, newdata = newdata)$prob
},
varimp = function(object, ...) {
object$importance
}
)
}
MLModelFunction(AdaBagModel) <- NULL
| /R/ML_AdaBagModel.R | no_license | cran/MachineShop | R | false | false | 2,635 | r | #' Bagging with Classification Trees
#'
#' Fits the Bagging algorithm proposed by Breiman in 1996 using classification
#' trees as single classifiers.
#'
#' @param mfinal number of trees to use.
#' @param minsplit minimum number of observations that must exist in a node in
#' order for a split to be attempted.
#' @param minbucket minimum number of observations in any terminal node.
#' @param cp complexity parameter.
#' @param maxcompete number of competitor splits retained in the output.
#' @param maxsurrogate number of surrogate splits retained in the output.
#' @param usesurrogate how to use surrogates in the splitting process.
#' @param xval number of cross-validations.
#' @param surrogatestyle controls the selection of a best surrogate.
#' @param maxdepth maximum depth of any node of the final tree, with the root
#' node counted as depth 0.
#'
#' @details
#' \describe{
#' \item{Response types:}{\code{factor}}
#' \item{\link[=TunedModel]{Automatic tuning} of grid parameters:}{
#' \code{mfinal}, \code{maxdepth}
#' }
#' }
#'
#' Further model details can be found in the source link below.
#'
#' @return \code{MLModel} class object.
#'
#' @seealso \code{\link[adabag]{bagging}}, \code{\link{fit}},
#' \code{\link{resample}}
#'
#' @examples
#' \donttest{
#' ## Requires prior installation of suggested package adabag to run
#'
#' fit(Species ~ ., data = iris, model = AdaBagModel(mfinal = 5))
#' }
#'
AdaBagModel <- function(
mfinal = 100, minsplit = 20, minbucket = round(minsplit/3), cp = 0.01,
maxcompete = 4, maxsurrogate = 5, usesurrogate = 2, xval = 10,
surrogatestyle = 0, maxdepth = 30
) {
MLModel(
name = "AdaBagModel",
label = "Bagging with Classification Trees",
packages = "adabag",
response_types = "factor",
predictor_encoding = "model.frame",
na.rm = FALSE,
params = new_params(environment()),
gridinfo = new_gridinfo(
param = c("mfinal", "maxdepth"),
get_values = c(
function(n, ...) round_int(seq_range(0, 25, c(1, 200), n + 1)),
function(n, ...) seq_len(min(n, 30))
)
),
fit = function(formula, data, weights, mfinal, ...) {
adabag::bagging(
formula, data = as.data.frame(formula, data), mfinal = mfinal,
control = list(...)
)
},
predict = function(object, newdata, ...) {
newdata <- as.data.frame(newdata)
predict(object, newdata = newdata)$prob
},
varimp = function(object, ...) {
object$importance
}
)
}
MLModelFunction(AdaBagModel) <- NULL
|
build_haplotype_network <- function(alg_file, genus_name = "Opheodesoma",
unique_name = "ESU1|ESU2",
palette) {
alg <- ape::read.dna(alg_file, format = "fasta")
alg <- alg[grepl(unique_name, dimnames(alg)[[1]]), ]
hap <- haplotype(alg)
net <- haploNet(hap)
pie <- stack(setNames(attr(hap, "index"), rownames(hap)))
reg_expr <- paste0("^(", genus_name, ")_(", unique_name, ")", "_(.+)")
pie$sp <- gsub(reg_expr, "\\1_\\2", dimnames(alg)[[1]])
tab <- table(pie$ind, pie$sp)
cols <- wesanderson::wes_palette(palette, min(5, ncol(tab)))
if (dim(net)[1] < 2) return(NULL)
plot(net, size = sapply(attr(hap, "index"), length), pie = tab, labels = FALSE,
bg = cols, threshold = 0, fast = FALSE, scale.ratio = 2)
legend("bottomleft", legend = colnames(tab), fill = cols)
}
all_networks <- function(genera = c("Euapta", "Opheodesoma"),
species = c("tahitiensis|godeffroyi|lappa",
"ESU1|ESU2|giantRed|redsea")
) {
all_alg <- list.files(path = "data/alignments", pattern = "-trimmed.afa$")
for (i in seq_along(genera)) {
sub_alg <- grep(tolower(genera[i]), all_alg, value = TRUE)
sapply(sub_alg, function(x) {
message(x, appendLF = FALSE)
if (grepl("euapta.+LSU|COI", x)) {message ("... skipped ..."); return(NULL)}
svg(file = file.path("networks", gsub("-trimmed.afa", "-network.svg", x)),
width = 10)
on.exit(dev.off())
build_haplotype_network(file.path("data", "alignments", x),
genus_name = genera[i],
unique_name = species[i],
palette = if (genera[i] == "Euapta") "Royal2" else "Rushmore")
message(" ... DONE.")
})
}
}
| /R/build_haplotypes.R | no_license | fmichonneau/opheodesoma | R | false | false | 1,965 | r | build_haplotype_network <- function(alg_file, genus_name = "Opheodesoma",
unique_name = "ESU1|ESU2",
palette) {
alg <- ape::read.dna(alg_file, format = "fasta")
alg <- alg[grepl(unique_name, dimnames(alg)[[1]]), ]
hap <- haplotype(alg)
net <- haploNet(hap)
pie <- stack(setNames(attr(hap, "index"), rownames(hap)))
reg_expr <- paste0("^(", genus_name, ")_(", unique_name, ")", "_(.+)")
pie$sp <- gsub(reg_expr, "\\1_\\2", dimnames(alg)[[1]])
tab <- table(pie$ind, pie$sp)
cols <- wesanderson::wes_palette(palette, min(5, ncol(tab)))
if (dim(net)[1] < 2) return(NULL)
plot(net, size = sapply(attr(hap, "index"), length), pie = tab, labels = FALSE,
bg = cols, threshold = 0, fast = FALSE, scale.ratio = 2)
legend("bottomleft", legend = colnames(tab), fill = cols)
}
all_networks <- function(genera = c("Euapta", "Opheodesoma"),
species = c("tahitiensis|godeffroyi|lappa",
"ESU1|ESU2|giantRed|redsea")
) {
all_alg <- list.files(path = "data/alignments", pattern = "-trimmed.afa$")
for (i in seq_along(genera)) {
sub_alg <- grep(tolower(genera[i]), all_alg, value = TRUE)
sapply(sub_alg, function(x) {
message(x, appendLF = FALSE)
if (grepl("euapta.+LSU|COI", x)) {message ("... skipped ..."); return(NULL)}
svg(file = file.path("networks", gsub("-trimmed.afa", "-network.svg", x)),
width = 10)
on.exit(dev.off())
build_haplotype_network(file.path("data", "alignments", x),
genus_name = genera[i],
unique_name = species[i],
palette = if (genera[i] == "Euapta") "Royal2" else "Rushmore")
message(" ... DONE.")
})
}
}
|
# Скачайте датасет, построенный на основании базы данных PHOIBLE (Moran et al.
# 2014) и постройте логистическую регрессию, предсказывающую наличие в языке
# долгих гласных (переменная have_long), на основании
# - количества гласных (переменная total) (МОДЕЛЬ 1)
# - количества гласных (переменная total) и части света (переменная area)
# (МОДЕЛЬ 2)
# В ответе приведите AIC лучшей модели.
# https://raw.githubusercontent.com/agricolamz/r_on_line_course_data/master/phoible_long_vowels.csv
# 942.9765
library(tidyverse)
vowels <- read_csv('datasets/phoible_long_vowels.csv')
model1 <- glm(
data = vowels,
formula = have_long ~ total,
family = 'binomial'
)
model2 <- glm(
data = vowels,
formula = have_long ~ total + area,
family = 'binomial'
)
result <- AIC(model1, model2)
cat(rownames(filter(result, AIC == min(AIC))), ":", min(result$AIC))
| /9/9.9.4.r | no_license | almaceleste/openedu-rling | R | false | false | 1,131 | r | # Скачайте датасет, построенный на основании базы данных PHOIBLE (Moran et al.
# 2014) и постройте логистическую регрессию, предсказывающую наличие в языке
# долгих гласных (переменная have_long), на основании
# - количества гласных (переменная total) (МОДЕЛЬ 1)
# - количества гласных (переменная total) и части света (переменная area)
# (МОДЕЛЬ 2)
# В ответе приведите AIC лучшей модели.
# https://raw.githubusercontent.com/agricolamz/r_on_line_course_data/master/phoible_long_vowels.csv
# 942.9765
library(tidyverse)
vowels <- read_csv('datasets/phoible_long_vowels.csv')
model1 <- glm(
data = vowels,
formula = have_long ~ total,
family = 'binomial'
)
model2 <- glm(
data = vowels,
formula = have_long ~ total + area,
family = 'binomial'
)
result <- AIC(model1, model2)
cat(rownames(filter(result, AIC == min(AIC))), ":", min(result$AIC))
|
rm(list = ls())
# target types to be calculated
# -> REMOVE AND ADAPT AT APPROPRIATE LOCATIONS FOR A USE-CASE
TARGET_TYPES = c(
"CLASS",
"REGR",
"MULTICLASS"
)
for (TARGET_TYPE in TARGET_TYPES) {
cat(paste0("\n\n******************* Compare models with '", TARGET_TYPE, "' target. *******************\n\n"))
tryCatch(
{
#TARGET_TYPE = "CLASS"
#################################################################################################################-
#|||| Initialize ||||----
#################################################################################################################-
# Load result from exploration
load(paste0("data/",TARGET_TYPE,"_1_explore.rdata"))
# Load libraries and functions
source("code/0_init.R")
# Initialize parallel processing
closeAllConnections() #reset
Sys.getenv("NUMBER_OF_PROCESSORS")
cl = makeCluster(4)
registerDoParallel(cl)
# stopCluster(cl); closeAllConnections() #stop cluster
# Set metric for peformance comparison
metric = switch(TARGET_TYPE, "CLASS" = "AUC", "REGR" = "spearman", "MULTICLASS" = "AUC")
classProbs = switch(TARGET_TYPE, "CLASS" = TRUE, "REGR" = FALSE, "MULTICLASS" = TRUE)
#################################################################################################################-
#|||| Test an algorithm (and determine parameter grid) ||||----
#################################################################################################################-
# Sample data ----------------------------------------------------------------------------------------------------
if (TARGET_TYPE %in% c("CLASS","MULTICLASS")) {
# Sample from all data (take all but n_maxpersample at most)
#c(df.tune, b_sample, b_all) %<-% (df %>% hmsPM::undersample_n(n_max_per_level = 5e3))
# Undersample only training data
c(df.tmp, b_sample, b_all) %<-% (df %>% filter(fold == "train") %>% undersample_n(n_max_per_level = 5e3))
df.tune = bind_rows(df.tmp, df %>% filter(fold == "test"))
summary(df.tune$target); b_sample; b_all
}
if (TARGET_TYPE == "REGR") {
# Sample from all data
df.tune = df %>% sample_n(min(nrow(.),5e3))
}
# Define some controls -------------------------------------------------------------------------------------------
l.index = list(i = which(df.tune$fold == "train"))
#set.seed(998)
#l.index = list(i = sample(1:nrow(df.tune), floor(0.8*nrow(df.tune)))) #random sample
# Index based test-set
ctrl_idx = trainControl(method = "cv", number = 1, index = l.index,
returnResamp = "final", returnData = FALSE,
summaryFunction = hmsPM::performance_summary, classProbs = classProbs)
# Dito but "fast" final fit: DO NOT USE in case of further application!!!
ctrl_idx_fff = ctrl_idx
ctrl_idx_fff$indexFinal = sample(1:nrow(df.tune), 100) #"Fast" final fit!!!
# Dito but without parallel processing: Needed for DeepLearn or H2o
ctrl_idx_nopar_fff = ctrl_idx_fff
ctrl_idx_nopar_fff$allowParallel = FALSE
# FFF as 5-fold cv
ctrl_cv_fff = trainControl(method = "cv", number = 5,
returnResamp = "final", returnData = FALSE,
summaryFunction = hmsPM::performance_summary, classProbs = classProbs,
indexFinal = sample(1:nrow(df.tune), 100)) #"Fast" final fit!!!
# Fits -----------------------------------------------------------------------------------------------------------
## Overwritten Lasso / Elastic Net: Possible to use sparse matrix
fit = train(x = sparse.model.matrix(as.formula(formula_binned),
df.tune[c("target",features_binned)]),
y = df.tune$target,
method = glmnet_custom,
trControl = ctrl_idx_fff,
metric = metric,
tuneGrid = expand.grid(alpha = c(0,0.2,0.5,0.8,1),
lambda = 2^(seq(5, -15, -2)))
#weights = exposure, family = "poisson"
)
#preProc = c("center","scale")) #no scaling needed due to dummy coding of all variables
plot(fit)
plot(fit, xlim = c(0,1))
# -> keep alpha=1 to have a full Lasso
## Random Forest
fit = train(x = df.tune[features],
y = df.tune$target,
#fit = train(x = model.matrix(as.formula(formula), df.tune[c("target",features)]), y = df.tune$target,
method = "ranger",
trControl = ctrl_idx_fff,
metric = metric,
tuneGrid = expand.grid(mtry = seq(1,length(features),10),
splitrule = switch(TARGET_TYPE,
"CLASS" = "gini", "REGR" = "variance",
"MULTICLASS" = "gini") ,
min.node.size = c(1,5,10)),
num.trees = 500) #use the Dots (...) for explicitly specifiying randomForest parameter
plot(fit)
# -> keep around the recommended values: mtry(class) = sqrt(length(features), mtry(regr) = 0.3 * length(features))
## Boosted Trees
# Default xgbTree: no parallel processing possible with DMatrix (and using sparse matrix
# will result in nonsparse trafo)
# fit = train(x = xgb.DMatrix(sparse.model.matrix(as.formula(formula),
# df.tune[c("target",features)])),
# y = df.tune$target,
# method = "xgbTree",
# trControl = ctrl_idx_nopar_fff, #no parallel for DMatrix
# metric = metric,
# tuneGrid = expand.grid(nrounds = seq(100,1100,200), eta = c(0.01),
# max_depth = c(3), min_child_weight = c(10),
# colsample_bytree = c(0.7), subsample = c(0.7),
# gamma = 0))
# plot(fit)
# Overwritten xgbTree: additional alpha and lambda parameter. Possible to use sparse matrix
# and parallel processing
fit = train(x = sparse.model.matrix(as.formula(formula),
df.tune[c("target",features)]),
y = df.tune$target,
method = xgb_custom,
trControl = ctrl_idx_fff, #parallel for overwritten xgb
metric = metric,
tuneGrid = expand.grid(nrounds = seq(100,3100,200), eta = c(0.01),
max_depth = c(3), min_child_weight = c(10),
colsample_bytree = c(0.7), subsample = c(0.7),
gamma = 0, alpha = 0, lambda = 1))
plot(fit)
hmsPM::plot_caret_result(fit = fit, metric = metric, x = "nrounds",
color = "max_depth", linetype = "eta", shape = "min_child_weight",
facet = "min_child_weight ~ subsample + colsample_bytree")
if (TARGET_TYPE != "MULTICLASS") {
# Lightgbm
fit = train(x = df.tune[features],
y = df.tune$target,
#fit = train(sparse.model.matrix(as.formula(formula), df.tune[c("target",features)]), df.tune$target,
method = lgbm,
trControl = ctrl_idx_nopar_fff,
metric = metric,
tuneGrid = expand.grid(nrounds = seq(100,2100,200), learning_rate = c(0.01),
num_leaves = 32, min_data_in_leaf = c(10),
feature_fraction = c(0.7), bagging_fraction = c(0.7)),
#max_depth = 3, #use for small data
verbose = -1)
plot(fit)
hmsPM::plot_caret_result(fit, metric = metric, x = "nrounds",
color = "num_leaves", linetype = "learning_rate",
shape = "min_data_in_leaf", facet = " ~ bagging_fraction + feature_fraction")
}
# DeepLearning
fit = train(form = as.formula(formula_notree),
data = df.tune[c("target",features_notree)],
method = deepLearn,
trControl = ctrl_idx_nopar_fff,
metric = metric,
tuneGrid = expand.grid(size = c("10","10-10"),
lambda = c(0), dropout = 0.5,
batch_size = c(100), lr = c(1e-3),
batch_normalization = TRUE,
activation = c("relu","elu"),
epochs = 10),
preProc = c("center","scale"),
verbose = 0)
plot(fit)
#################################################################################################################-
#|||| Evaluate generalization gap ||||----
#################################################################################################################-
# Sample data (usually undersample training data)
df.gengap = df.tune
# Tune grid to loop over
tunepar = expand.grid(nrounds = seq(100,3100,500), eta = c(0.01),
max_depth = c(3,6), min_child_weight = c(10),
colsample_bytree = c(0.7), subsample = c(0.7),
gamma = c(0), alpha = c(0), lambda = c(1))
# Calc generalization gap
df.gengap_result = hmsPM::calc_gengap(df_data = df.gengap,
formula_string = formula,
sparse = TRUE,
method = xgb_custom,
tune_grid = tunepar,
cluster = cl)
# Plot generalization gap
(plots = hmsPM::plot_gengap(df_gengap = df.gengap_result,
metric = metric,
x = "nrounds",
color = "max_depth",
shape = "gamma",
facet = "min_child_weight ~ alpha + lambda"))
grobs = marrangeGrob(plots, ncol = 2, nrow = 1)
ggsave(paste0(plotloc,TARGET_TYPE,"_generalization_gap.pdf"),
grobs,
width = 12, height = 8)
#################################################################################################################-
#|||| Simulation: compare algorithms ||||----
#################################################################################################################-
# Basic data sampling
df.sim = df.tune
# Define methods to run in simulation
l.xgb = list(method = xgb_custom,
formula_string = formula,
sparse = TRUE,
tune_grid = expand.grid(nrounds = 2100, eta = c(0.01),
max_depth = c(3), min_child_weight = c(10),
colsample_bytree = c(0.7), subsample = c(0.7),
gamma = 0, alpha = 0, lambda = 1))
l.glmnet = list(method = glmnet_custom,
formula_string = formula_binned,
sparse = TRUE,
tune_grid = expand.grid(alpha = 0,
lambda = 2^(seq(4, -10, -2))))
# Simulate
df.sim_result = hmsPM::calc_simulation(df_data = df.sim,
n_sim = 3,
metric = metric,
sample_frac_train = 0.8,
sample_frac_test = 0.5,
l_methods = list(xgb = l.xgb,
glmnet = l.glmnet))
(plot = hmsPM::plot_simulation(df_simulation = df.sim_result,
metric = metric))
ggsave(paste0(plotloc,TARGET_TYPE,"_model_comparison.pdf"),
plot,
width = 12, height = 8)
#################################################################################################################-
#|||| Learning curve for winner algorithm ||||----
#################################################################################################################-
# Basic data sampling (do NOT undersamle as this is done in calc_learningcurve;
# in fact you finally should not sample at all)
df.lc = df %>% sample_n(min(nrow(.),5e3))
# Tunegrid
tunepar = expand.grid(nrounds = seq(100,2100,500), eta = c(0.01),
max_depth = c(3), min_child_weight = c(10),
colsample_bytree = c(0.7), subsample = c(0.7),
gamma = 0, alpha = 0, lambda = 1)
# Calc lc
df.lc_result = hmsPM::calc_learningcurve(df_data = df.lc,
formula_string = formula,
sparse = TRUE,
method = xgb_custom,
tune_grid = tunepar,
chunks_pct = c(seq(5,10,1), seq(20,100,10)),
balanced = TRUE,
metric = metric)
(p = hmsPM::plot_learningcurve(df_lc = df.lc_result,
metric = metric))
ggsave(paste0(plotloc,TARGET_TYPE,"_learningCurve.pdf"),
p,
width = 8, height = 6)
},
error = function (e) {
message("\n\n!!!!!!!!!!!!!!!!!!!!!!!!!! ERROR in 2_modelcomparison.R for TARGET_TYPE '", TARGET_TYPE, "'")
}
)
}
| /code/hmsPM/example/code/2_modelcomparison.R | no_license | uwpz/AzureMLS | R | false | false | 14,946 | r | rm(list = ls())
# target types to be calculated
# -> REMOVE AND ADAPT AT APPROPRIATE LOCATIONS FOR A USE-CASE
TARGET_TYPES = c(
"CLASS",
"REGR",
"MULTICLASS"
)
for (TARGET_TYPE in TARGET_TYPES) {
cat(paste0("\n\n******************* Compare models with '", TARGET_TYPE, "' target. *******************\n\n"))
tryCatch(
{
#TARGET_TYPE = "CLASS"
#################################################################################################################-
#|||| Initialize ||||----
#################################################################################################################-
# Load result from exploration
load(paste0("data/",TARGET_TYPE,"_1_explore.rdata"))
# Load libraries and functions
source("code/0_init.R")
# Initialize parallel processing
closeAllConnections() #reset
Sys.getenv("NUMBER_OF_PROCESSORS")
cl = makeCluster(4)
registerDoParallel(cl)
# stopCluster(cl); closeAllConnections() #stop cluster
# Set metric for peformance comparison
metric = switch(TARGET_TYPE, "CLASS" = "AUC", "REGR" = "spearman", "MULTICLASS" = "AUC")
classProbs = switch(TARGET_TYPE, "CLASS" = TRUE, "REGR" = FALSE, "MULTICLASS" = TRUE)
#################################################################################################################-
#|||| Test an algorithm (and determine parameter grid) ||||----
#################################################################################################################-
# Sample data ----------------------------------------------------------------------------------------------------
if (TARGET_TYPE %in% c("CLASS","MULTICLASS")) {
# Sample from all data (take all but n_maxpersample at most)
#c(df.tune, b_sample, b_all) %<-% (df %>% hmsPM::undersample_n(n_max_per_level = 5e3))
# Undersample only training data
c(df.tmp, b_sample, b_all) %<-% (df %>% filter(fold == "train") %>% undersample_n(n_max_per_level = 5e3))
df.tune = bind_rows(df.tmp, df %>% filter(fold == "test"))
summary(df.tune$target); b_sample; b_all
}
if (TARGET_TYPE == "REGR") {
# Sample from all data
df.tune = df %>% sample_n(min(nrow(.),5e3))
}
# Define some controls -------------------------------------------------------------------------------------------
l.index = list(i = which(df.tune$fold == "train"))
#set.seed(998)
#l.index = list(i = sample(1:nrow(df.tune), floor(0.8*nrow(df.tune)))) #random sample
# Index based test-set
ctrl_idx = trainControl(method = "cv", number = 1, index = l.index,
returnResamp = "final", returnData = FALSE,
summaryFunction = hmsPM::performance_summary, classProbs = classProbs)
# Dito but "fast" final fit: DO NOT USE in case of further application!!!
ctrl_idx_fff = ctrl_idx
ctrl_idx_fff$indexFinal = sample(1:nrow(df.tune), 100) #"Fast" final fit!!!
# Dito but without parallel processing: Needed for DeepLearn or H2o
ctrl_idx_nopar_fff = ctrl_idx_fff
ctrl_idx_nopar_fff$allowParallel = FALSE
# FFF as 5-fold cv
ctrl_cv_fff = trainControl(method = "cv", number = 5,
returnResamp = "final", returnData = FALSE,
summaryFunction = hmsPM::performance_summary, classProbs = classProbs,
indexFinal = sample(1:nrow(df.tune), 100)) #"Fast" final fit!!!
# Fits -----------------------------------------------------------------------------------------------------------
## Overwritten Lasso / Elastic Net: Possible to use sparse matrix
fit = train(x = sparse.model.matrix(as.formula(formula_binned),
df.tune[c("target",features_binned)]),
y = df.tune$target,
method = glmnet_custom,
trControl = ctrl_idx_fff,
metric = metric,
tuneGrid = expand.grid(alpha = c(0,0.2,0.5,0.8,1),
lambda = 2^(seq(5, -15, -2)))
#weights = exposure, family = "poisson"
)
#preProc = c("center","scale")) #no scaling needed due to dummy coding of all variables
plot(fit)
plot(fit, xlim = c(0,1))
# -> keep alpha=1 to have a full Lasso
## Random Forest
fit = train(x = df.tune[features],
y = df.tune$target,
#fit = train(x = model.matrix(as.formula(formula), df.tune[c("target",features)]), y = df.tune$target,
method = "ranger",
trControl = ctrl_idx_fff,
metric = metric,
tuneGrid = expand.grid(mtry = seq(1,length(features),10),
splitrule = switch(TARGET_TYPE,
"CLASS" = "gini", "REGR" = "variance",
"MULTICLASS" = "gini") ,
min.node.size = c(1,5,10)),
num.trees = 500) #use the Dots (...) for explicitly specifiying randomForest parameter
plot(fit)
# -> keep around the recommended values: mtry(class) = sqrt(length(features), mtry(regr) = 0.3 * length(features))
## Boosted Trees
# Default xgbTree: no parallel processing possible with DMatrix (and using sparse matrix
# will result in nonsparse trafo)
# fit = train(x = xgb.DMatrix(sparse.model.matrix(as.formula(formula),
# df.tune[c("target",features)])),
# y = df.tune$target,
# method = "xgbTree",
# trControl = ctrl_idx_nopar_fff, #no parallel for DMatrix
# metric = metric,
# tuneGrid = expand.grid(nrounds = seq(100,1100,200), eta = c(0.01),
# max_depth = c(3), min_child_weight = c(10),
# colsample_bytree = c(0.7), subsample = c(0.7),
# gamma = 0))
# plot(fit)
# Overwritten xgbTree: additional alpha and lambda parameter. Possible to use sparse matrix
# and parallel processing
fit = train(x = sparse.model.matrix(as.formula(formula),
df.tune[c("target",features)]),
y = df.tune$target,
method = xgb_custom,
trControl = ctrl_idx_fff, #parallel for overwritten xgb
metric = metric,
tuneGrid = expand.grid(nrounds = seq(100,3100,200), eta = c(0.01),
max_depth = c(3), min_child_weight = c(10),
colsample_bytree = c(0.7), subsample = c(0.7),
gamma = 0, alpha = 0, lambda = 1))
plot(fit)
hmsPM::plot_caret_result(fit = fit, metric = metric, x = "nrounds",
color = "max_depth", linetype = "eta", shape = "min_child_weight",
facet = "min_child_weight ~ subsample + colsample_bytree")
if (TARGET_TYPE != "MULTICLASS") {
# Lightgbm
fit = train(x = df.tune[features],
y = df.tune$target,
#fit = train(sparse.model.matrix(as.formula(formula), df.tune[c("target",features)]), df.tune$target,
method = lgbm,
trControl = ctrl_idx_nopar_fff,
metric = metric,
tuneGrid = expand.grid(nrounds = seq(100,2100,200), learning_rate = c(0.01),
num_leaves = 32, min_data_in_leaf = c(10),
feature_fraction = c(0.7), bagging_fraction = c(0.7)),
#max_depth = 3, #use for small data
verbose = -1)
plot(fit)
hmsPM::plot_caret_result(fit, metric = metric, x = "nrounds",
color = "num_leaves", linetype = "learning_rate",
shape = "min_data_in_leaf", facet = " ~ bagging_fraction + feature_fraction")
}
# DeepLearning
fit = train(form = as.formula(formula_notree),
data = df.tune[c("target",features_notree)],
method = deepLearn,
trControl = ctrl_idx_nopar_fff,
metric = metric,
tuneGrid = expand.grid(size = c("10","10-10"),
lambda = c(0), dropout = 0.5,
batch_size = c(100), lr = c(1e-3),
batch_normalization = TRUE,
activation = c("relu","elu"),
epochs = 10),
preProc = c("center","scale"),
verbose = 0)
plot(fit)
#################################################################################################################-
#|||| Evaluate generalization gap ||||----
#################################################################################################################-
# Sample data (usually undersample training data)
df.gengap = df.tune
# Tune grid to loop over
tunepar = expand.grid(nrounds = seq(100,3100,500), eta = c(0.01),
max_depth = c(3,6), min_child_weight = c(10),
colsample_bytree = c(0.7), subsample = c(0.7),
gamma = c(0), alpha = c(0), lambda = c(1))
# Calc generalization gap
df.gengap_result = hmsPM::calc_gengap(df_data = df.gengap,
formula_string = formula,
sparse = TRUE,
method = xgb_custom,
tune_grid = tunepar,
cluster = cl)
# Plot generalization gap
(plots = hmsPM::plot_gengap(df_gengap = df.gengap_result,
metric = metric,
x = "nrounds",
color = "max_depth",
shape = "gamma",
facet = "min_child_weight ~ alpha + lambda"))
grobs = marrangeGrob(plots, ncol = 2, nrow = 1)
ggsave(paste0(plotloc,TARGET_TYPE,"_generalization_gap.pdf"),
grobs,
width = 12, height = 8)
#################################################################################################################-
#|||| Simulation: compare algorithms ||||----
#################################################################################################################-
# Basic data sampling
df.sim = df.tune
# Define methods to run in simulation
l.xgb = list(method = xgb_custom,
formula_string = formula,
sparse = TRUE,
tune_grid = expand.grid(nrounds = 2100, eta = c(0.01),
max_depth = c(3), min_child_weight = c(10),
colsample_bytree = c(0.7), subsample = c(0.7),
gamma = 0, alpha = 0, lambda = 1))
l.glmnet = list(method = glmnet_custom,
formula_string = formula_binned,
sparse = TRUE,
tune_grid = expand.grid(alpha = 0,
lambda = 2^(seq(4, -10, -2))))
# Simulate
df.sim_result = hmsPM::calc_simulation(df_data = df.sim,
n_sim = 3,
metric = metric,
sample_frac_train = 0.8,
sample_frac_test = 0.5,
l_methods = list(xgb = l.xgb,
glmnet = l.glmnet))
(plot = hmsPM::plot_simulation(df_simulation = df.sim_result,
metric = metric))
ggsave(paste0(plotloc,TARGET_TYPE,"_model_comparison.pdf"),
plot,
width = 12, height = 8)
#################################################################################################################-
#|||| Learning curve for winner algorithm ||||----
#################################################################################################################-
# Basic data sampling (do NOT undersamle as this is done in calc_learningcurve;
# in fact you finally should not sample at all)
df.lc = df %>% sample_n(min(nrow(.),5e3))
# Tunegrid
tunepar = expand.grid(nrounds = seq(100,2100,500), eta = c(0.01),
max_depth = c(3), min_child_weight = c(10),
colsample_bytree = c(0.7), subsample = c(0.7),
gamma = 0, alpha = 0, lambda = 1)
# Calc lc
df.lc_result = hmsPM::calc_learningcurve(df_data = df.lc,
formula_string = formula,
sparse = TRUE,
method = xgb_custom,
tune_grid = tunepar,
chunks_pct = c(seq(5,10,1), seq(20,100,10)),
balanced = TRUE,
metric = metric)
(p = hmsPM::plot_learningcurve(df_lc = df.lc_result,
metric = metric))
ggsave(paste0(plotloc,TARGET_TYPE,"_learningCurve.pdf"),
p,
width = 8, height = 6)
},
error = function (e) {
message("\n\n!!!!!!!!!!!!!!!!!!!!!!!!!! ERROR in 2_modelcomparison.R for TARGET_TYPE '", TARGET_TYPE, "'")
}
)
}
|
## Mantel test
## analyses for McDaniel et al done with R version 3.5.3
## ade4 version 1.7-13
install.packages("ade4")
library(ade4)
fst <- read.csv("data/fst.csv",header=F)
fst_female <- read.csv("data/fst_female.csv",header=F)
fst_male <- read.csv("data/fst_male.csv",header=F)
dist <- read.csv("data/dist.csv",header=F)
## all populations
genALL <- as.dist(fst)
geoALL <- as.dist(dist)
r1 <- mantel.rtest(geoALL,genALL,nrepet=999)
r1
plot(r1)
##just the bottom 5 populations
gen <- as.dist(fst[1:5,1:5])
geo <- as.dist(dist[1:5,1:5])
r1 <- mantel.rtest(geo,gen,nrepet=999)
r1
plot(r1)
## using female sex-linked loci on all populations
genALL <- as.dist(fst_female)
geoALL <- as.dist(dist)
r1 <- mantel.rtest(geoALL,genALL,nrepet=999)
r1
plot(r1)
## using male sex-linked loci on all populations
genALL <- as.dist(fst_male)
geoALL <- as.dist(dist)
r1 <- mantel.rtest(geoALL,genALL,nrepet=999)
r1
plot(r1)
| /mantel.R | no_license | sarahcarey/Local_adaptation_Ceratodon | R | false | false | 922 | r |
## Mantel test
## analyses for McDaniel et al done with R version 3.5.3
## ade4 version 1.7-13
install.packages("ade4")
library(ade4)
fst <- read.csv("data/fst.csv",header=F)
fst_female <- read.csv("data/fst_female.csv",header=F)
fst_male <- read.csv("data/fst_male.csv",header=F)
dist <- read.csv("data/dist.csv",header=F)
## all populations
genALL <- as.dist(fst)
geoALL <- as.dist(dist)
r1 <- mantel.rtest(geoALL,genALL,nrepet=999)
r1
plot(r1)
##just the bottom 5 populations
gen <- as.dist(fst[1:5,1:5])
geo <- as.dist(dist[1:5,1:5])
r1 <- mantel.rtest(geo,gen,nrepet=999)
r1
plot(r1)
## using female sex-linked loci on all populations
genALL <- as.dist(fst_female)
geoALL <- as.dist(dist)
r1 <- mantel.rtest(geoALL,genALL,nrepet=999)
r1
plot(r1)
## using male sex-linked loci on all populations
genALL <- as.dist(fst_male)
geoALL <- as.dist(dist)
r1 <- mantel.rtest(geoALL,genALL,nrepet=999)
r1
plot(r1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jonesDatum.R
\docType{class}
\name{jonesDatum-class}
\alias{jonesDatum-class}
\alias{jonesDatum}
\alias{plot,jonesDatum,character-method}
\alias{subset,jonesDatum-method}
\title{Jones Datum object}
\usage{
\S4method{plot}{jonesDatum,character}(x, y = c("raw", "adjusted",
"standardized", "background", "corrected"), xlab = "", ylab = "",
axes = FALSE, ...)
\S4method{subset}{jonesDatum}(x, subset, select, type = c("adjusted", "raw",
"standardized", "background", "corrected"), drop = FALSE, ...)
}
\arguments{
\item{x}{jonesDatum object, required}
\item{y}{char of: 'raw', 'adjusted', 'scaled', 'background', 'corrected'}
\item{xlab}{X-axis label (units will be appended)}
\item{ylab}{Y-axis label (units will be appended)}
\item{axes}{bool, draw with or without axes (FALSE by default)}
\item{...}{for plot: passed to plot.window. otherwise unused.}
\item{subset}{logical test for rows}
\item{select}{logical test for columns}
\item{type}{type=c('adjusted', 'standardized','raw','background','corrected')}
\item{drop}{sent to indexing operation for \code{x[,,drop]}}
}
\description{
Jones Datum object
plot
subset
}
\section{Fields}{
\describe{
\item{\code{fileName}}{Provide the full path to teh file}
\item{\code{metaData}}{no input. contains metadata and info for file}
\item{\code{data}}{no input. list of data types: Raw, Adjusted, Rescaled and Time vector}
\item{\code{background}}{no input. contains, if applicable, lum, roi and area of background roi}
\item{\code{ares}}{no input. data.frame of cell# and corresponding roi area}
\item{\code{rois}}{no input. named list for each roi in the dataset}
\item{\code{image}}{no input. Contains the image data, if applicable.}
}}
\section{Methods}{
\describe{
\item{\code{getConfluence()}}{Report confluence as a percent (0-100).}
\item{\code{getData(type = c("adjusted", "standardized", "raw", "background",
"corrected"))}}{Extract full dataset of provided type.}
\item{\code{getDataAsList(type = c("adjusted", "standardized", "raw", "background",
"corected"))}}{Extract the data as a named list of X,Y datasets for each Cell.}
\item{\code{getIDs()}}{Get the names of the cell IDs}
\item{\code{getOtherTreatment()}}{Check the other treatments, typically IL-1b.}
\item{\code{getTreatment()}}{Report Treatment type, typically ET1.}
\item{\code{isConfluent(prcnt = 100)}}{Check if confluence is == \code{prcnt}.}
\item{\code{isET1()}}{Check if ET1 is in the primary treatment.}
\item{\code{isIL1b()}}{Check if IL1b is in other treatments.}
\item{\code{plotImage(channel = c("green", "all", "red", "blue"), ofst = 0,
greyscale = FALSE, equalize = TRUE, gamma = 2.2, colors = NULL,
texts = NULL, draw.roi = TRUE, roi.alpha = 0.5, draw.text = TRUE,
text.color = NULL, ...)}}{Plots the corresponding image if applicable. \cr
\code{channel}: Choose a specific channel or 'all' for all channels.\cr
\code{ofst}: Value to set unchosen channels on [0,1] can reduce contrast\cr
\code{greyscale}: boolean, convert image to greyscale (2d indexed)\cr
\code{equalize}: boolean, Use histogram equalization\cr
\code{gamma}: double, used if greyscale or hist equalization\cr
\code{colors}: hex color matrix for ROIs (defaults to all red)\cr
\code{draw.roi} boolean, Draw the ROIs over the image \cr
\code{roi.alpha} double, Transparency level for ROI on [0,1]\cr
\code{draw.text} boolean, Draw text labels, provided or otheriwse, at the centroid of each ROI\cr
\code{text.color} array, Hex color vector. If scalar, color is applied to all text, otherwise
it must be the same length as .self$nCells().\cr
\code{...}: Parameters passed to \code{plot.window()}.}
\item{\code{save(pathName, objectName = "", ...)}}{Save the current object on the file in R external object format.}
}}
| /man/jonesDatum-class.Rd | permissive | Khlick/jonesDataClass | R | false | true | 3,876 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jonesDatum.R
\docType{class}
\name{jonesDatum-class}
\alias{jonesDatum-class}
\alias{jonesDatum}
\alias{plot,jonesDatum,character-method}
\alias{subset,jonesDatum-method}
\title{Jones Datum object}
\usage{
\S4method{plot}{jonesDatum,character}(x, y = c("raw", "adjusted",
"standardized", "background", "corrected"), xlab = "", ylab = "",
axes = FALSE, ...)
\S4method{subset}{jonesDatum}(x, subset, select, type = c("adjusted", "raw",
"standardized", "background", "corrected"), drop = FALSE, ...)
}
\arguments{
\item{x}{jonesDatum object, required}
\item{y}{char of: 'raw', 'adjusted', 'scaled', 'background', 'corrected'}
\item{xlab}{X-axis label (units will be appended)}
\item{ylab}{Y-axis label (units will be appended)}
\item{axes}{bool, draw with or without axes (FALSE by default)}
\item{...}{for plot: passed to plot.window. otherwise unused.}
\item{subset}{logical test for rows}
\item{select}{logical test for columns}
\item{type}{type=c('adjusted', 'standardized','raw','background','corrected')}
\item{drop}{sent to indexing operation for \code{x[,,drop]}}
}
\description{
Jones Datum object
plot
subset
}
\section{Fields}{
\describe{
\item{\code{fileName}}{Provide the full path to teh file}
\item{\code{metaData}}{no input. contains metadata and info for file}
\item{\code{data}}{no input. list of data types: Raw, Adjusted, Rescaled and Time vector}
\item{\code{background}}{no input. contains, if applicable, lum, roi and area of background roi}
\item{\code{ares}}{no input. data.frame of cell# and corresponding roi area}
\item{\code{rois}}{no input. named list for each roi in the dataset}
\item{\code{image}}{no input. Contains the image data, if applicable.}
}}
\section{Methods}{
\describe{
\item{\code{getConfluence()}}{Report confluence as a percent (0-100).}
\item{\code{getData(type = c("adjusted", "standardized", "raw", "background",
"corrected"))}}{Extract full dataset of provided type.}
\item{\code{getDataAsList(type = c("adjusted", "standardized", "raw", "background",
"corected"))}}{Extract the data as a named list of X,Y datasets for each Cell.}
\item{\code{getIDs()}}{Get the names of the cell IDs}
\item{\code{getOtherTreatment()}}{Check the other treatments, typically IL-1b.}
\item{\code{getTreatment()}}{Report Treatment type, typically ET1.}
\item{\code{isConfluent(prcnt = 100)}}{Check if confluence is == \code{prcnt}.}
\item{\code{isET1()}}{Check if ET1 is in the primary treatment.}
\item{\code{isIL1b()}}{Check if IL1b is in other treatments.}
\item{\code{plotImage(channel = c("green", "all", "red", "blue"), ofst = 0,
greyscale = FALSE, equalize = TRUE, gamma = 2.2, colors = NULL,
texts = NULL, draw.roi = TRUE, roi.alpha = 0.5, draw.text = TRUE,
text.color = NULL, ...)}}{Plots the corresponding image if applicable. \cr
\code{channel}: Choose a specific channel or 'all' for all channels.\cr
\code{ofst}: Value to set unchosen channels on [0,1] can reduce contrast\cr
\code{greyscale}: boolean, convert image to greyscale (2d indexed)\cr
\code{equalize}: boolean, Use histogram equalization\cr
\code{gamma}: double, used if greyscale or hist equalization\cr
\code{colors}: hex color matrix for ROIs (defaults to all red)\cr
\code{draw.roi} boolean, Draw the ROIs over the image \cr
\code{roi.alpha} double, Transparency level for ROI on [0,1]\cr
\code{draw.text} boolean, Draw text labels, provided or otheriwse, at the centroid of each ROI\cr
\code{text.color} array, Hex color vector. If scalar, color is applied to all text, otherwise
it must be the same length as .self$nCells().\cr
\code{...}: Parameters passed to \code{plot.window()}.}
\item{\code{save(pathName, objectName = "", ...)}}{Save the current object on the file in R external object format.}
}}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route53resolver_service.R
\name{route53resolver}
\alias{route53resolver}
\title{Amazon Route 53 Resolver}
\usage{
route53resolver(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
When you create a VPC using Amazon VPC, you automatically get DNS
resolution within the VPC from Route 53 Resolver. By default, Resolver
answers DNS queries for VPC domain names such as domain names for EC2
instances or ELB load balancers. Resolver performs recursive lookups
against public name servers for all other domain names.
You can also configure DNS resolution between your VPC and your network
over a Direct Connect or VPN connection:
\strong{Forward DNS queries from resolvers on your network to Route 53
Resolver}
DNS resolvers on your network can forward DNS queries to Resolver in a
specified VPC. This allows your DNS resolvers to easily resolve domain
names for AWS resources such as EC2 instances or records in a Route 53
private hosted zone. For more information, see \href{https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver.html#resolver-overview-forward-network-to-vpc}{How DNS Resolvers on Your Network Forward DNS Queries to Route 53 Resolver}
in the \emph{Amazon Route 53 Developer Guide}.
\strong{Conditionally forward queries from a VPC to resolvers on your
network}
You can configure Resolver to forward queries that it receives from EC2
instances in your VPCs to DNS resolvers on your network. To forward
selected queries, you create Resolver rules that specify the domain
names for the DNS queries that you want to forward (such as
example.com), and the IP addresses of the DNS resolvers on your network
that you want to forward the queries to. If a query matches multiple
rules (example.com, acme.example.com), Resolver chooses the rule with
the most specific match (acme.example.com) and forwards the query to the
IP addresses that you specified in that rule. For more information, see
\href{https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver.html#resolver-overview-forward-vpc-to-network}{How Route 53 Resolver Forwards DNS Queries from Your VPCs to Your Network}
in the \emph{Amazon Route 53 Developer Guide}.
Like Amazon VPC, Resolver is regional. In each region where you have
VPCs, you can choose whether to forward queries from your VPCs to your
network (outbound queries), from your network to your VPCs (inbound
queries), or both.
}
\section{Service syntax}{
\preformatted{svc <- route53resolver(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=route53resolver_associate_resolver_endpoint_ip_address]{associate_resolver_endpoint_ip_address} \tab Adds IP addresses to an inbound or an outbound Resolver endpoint\cr
\link[=route53resolver_associate_resolver_query_log_config]{associate_resolver_query_log_config} \tab Associates an Amazon VPC with a specified query logging configuration\cr
\link[=route53resolver_associate_resolver_rule]{associate_resolver_rule} \tab Associates a Resolver rule with a VPC\cr
\link[=route53resolver_create_resolver_endpoint]{create_resolver_endpoint} \tab Creates a Resolver endpoint\cr
\link[=route53resolver_create_resolver_query_log_config]{create_resolver_query_log_config} \tab Creates a Resolver query logging configuration, which defines where you want Resolver to save DNS query logs that originate in your VPCs\cr
\link[=route53resolver_create_resolver_rule]{create_resolver_rule} \tab For DNS queries that originate in your VPCs, specifies which Resolver endpoint the queries pass through, one domain name that you want to forward to your network, and the IP addresses of the DNS resolvers in your network\cr
\link[=route53resolver_delete_resolver_endpoint]{delete_resolver_endpoint} \tab Deletes a Resolver endpoint\cr
\link[=route53resolver_delete_resolver_query_log_config]{delete_resolver_query_log_config} \tab Deletes a query logging configuration\cr
\link[=route53resolver_delete_resolver_rule]{delete_resolver_rule} \tab Deletes a Resolver rule\cr
\link[=route53resolver_disassociate_resolver_endpoint_ip_address]{disassociate_resolver_endpoint_ip_address} \tab Removes IP addresses from an inbound or an outbound Resolver endpoint\cr
\link[=route53resolver_disassociate_resolver_query_log_config]{disassociate_resolver_query_log_config} \tab Disassociates a VPC from a query logging configuration\cr
\link[=route53resolver_disassociate_resolver_rule]{disassociate_resolver_rule} \tab Removes the association between a specified Resolver rule and a specified VPC\cr
\link[=route53resolver_get_resolver_dnssec_config]{get_resolver_dnssec_config} \tab Gets DNSSEC validation information for a specified resource\cr
\link[=route53resolver_get_resolver_endpoint]{get_resolver_endpoint} \tab Gets information about a specified Resolver endpoint, such as whether it's an inbound or an outbound Resolver endpoint, and the current status of the endpoint\cr
\link[=route53resolver_get_resolver_query_log_config]{get_resolver_query_log_config} \tab Gets information about a specified Resolver query logging configuration, such as the number of VPCs that the configuration is logging queries for and the location that logs are sent to\cr
\link[=route53resolver_get_resolver_query_log_config_association]{get_resolver_query_log_config_association} \tab Gets information about a specified association between a Resolver query logging configuration and an Amazon VPC\cr
\link[=route53resolver_get_resolver_query_log_config_policy]{get_resolver_query_log_config_policy} \tab Gets information about a query logging policy\cr
\link[=route53resolver_get_resolver_rule]{get_resolver_rule} \tab Gets information about a specified Resolver rule, such as the domain name that the rule forwards DNS queries for and the ID of the outbound Resolver endpoint that the rule is associated with\cr
\link[=route53resolver_get_resolver_rule_association]{get_resolver_rule_association} \tab Gets information about an association between a specified Resolver rule and a VPC\cr
\link[=route53resolver_get_resolver_rule_policy]{get_resolver_rule_policy} \tab Gets information about the Resolver rule policy for a specified rule\cr
\link[=route53resolver_list_resolver_dnssec_configs]{list_resolver_dnssec_configs} \tab Lists the configurations for DNSSEC validation that are associated with the current AWS account\cr
\link[=route53resolver_list_resolver_endpoint_ip_addresses]{list_resolver_endpoint_ip_addresses} \tab Gets the IP addresses for a specified Resolver endpoint\cr
\link[=route53resolver_list_resolver_endpoints]{list_resolver_endpoints} \tab Lists all the Resolver endpoints that were created using the current AWS account\cr
\link[=route53resolver_list_resolver_query_log_config_associations]{list_resolver_query_log_config_associations} \tab Lists information about associations between Amazon VPCs and query logging configurations\cr
\link[=route53resolver_list_resolver_query_log_configs]{list_resolver_query_log_configs} \tab Lists information about the specified query logging configurations\cr
\link[=route53resolver_list_resolver_rule_associations]{list_resolver_rule_associations} \tab Lists the associations that were created between Resolver rules and VPCs using the current AWS account\cr
\link[=route53resolver_list_resolver_rules]{list_resolver_rules} \tab Lists the Resolver rules that were created using the current AWS account\cr
\link[=route53resolver_list_tags_for_resource]{list_tags_for_resource} \tab Lists the tags that you associated with the specified resource\cr
\link[=route53resolver_put_resolver_query_log_config_policy]{put_resolver_query_log_config_policy} \tab Specifies an AWS account that you want to share a query logging configuration with, the query logging configuration that you want to share, and the operations that you want the account to be able to perform on the configuration\cr
\link[=route53resolver_put_resolver_rule_policy]{put_resolver_rule_policy} \tab Specifies an AWS rule that you want to share with another account, the account that you want to share the rule with, and the operations that you want the account to be able to perform on the rule\cr
\link[=route53resolver_tag_resource]{tag_resource} \tab Adds one or more tags to a specified resource\cr
\link[=route53resolver_untag_resource]{untag_resource} \tab Removes one or more tags from a specified resource\cr
\link[=route53resolver_update_resolver_dnssec_config]{update_resolver_dnssec_config} \tab Updates an existing DNSSEC validation configuration\cr
\link[=route53resolver_update_resolver_endpoint]{update_resolver_endpoint} \tab Updates the name of an inbound or an outbound Resolver endpoint\cr
\link[=route53resolver_update_resolver_rule]{update_resolver_rule} \tab Updates settings for a specified Resolver rule
}
}
\examples{
\dontrun{
svc <- route53resolver()
svc$associate_resolver_endpoint_ip_address(
Foo = 123
)
}
}
| /cran/paws.networking/man/route53resolver.Rd | permissive | TWarczak/paws | R | false | true | 9,248 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route53resolver_service.R
\name{route53resolver}
\alias{route53resolver}
\title{Amazon Route 53 Resolver}
\usage{
route53resolver(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
When you create a VPC using Amazon VPC, you automatically get DNS
resolution within the VPC from Route 53 Resolver. By default, Resolver
answers DNS queries for VPC domain names such as domain names for EC2
instances or ELB load balancers. Resolver performs recursive lookups
against public name servers for all other domain names.
You can also configure DNS resolution between your VPC and your network
over a Direct Connect or VPN connection:
\strong{Forward DNS queries from resolvers on your network to Route 53
Resolver}
DNS resolvers on your network can forward DNS queries to Resolver in a
specified VPC. This allows your DNS resolvers to easily resolve domain
names for AWS resources such as EC2 instances or records in a Route 53
private hosted zone. For more information, see \href{https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver.html#resolver-overview-forward-network-to-vpc}{How DNS Resolvers on Your Network Forward DNS Queries to Route 53 Resolver}
in the \emph{Amazon Route 53 Developer Guide}.
\strong{Conditionally forward queries from a VPC to resolvers on your
network}
You can configure Resolver to forward queries that it receives from EC2
instances in your VPCs to DNS resolvers on your network. To forward
selected queries, you create Resolver rules that specify the domain
names for the DNS queries that you want to forward (such as
example.com), and the IP addresses of the DNS resolvers on your network
that you want to forward the queries to. If a query matches multiple
rules (example.com, acme.example.com), Resolver chooses the rule with
the most specific match (acme.example.com) and forwards the query to the
IP addresses that you specified in that rule. For more information, see
\href{https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver.html#resolver-overview-forward-vpc-to-network}{How Route 53 Resolver Forwards DNS Queries from Your VPCs to Your Network}
in the \emph{Amazon Route 53 Developer Guide}.
Like Amazon VPC, Resolver is regional. In each region where you have
VPCs, you can choose whether to forward queries from your VPCs to your
network (outbound queries), from your network to your VPCs (inbound
queries), or both.
}
\section{Service syntax}{
\preformatted{svc <- route53resolver(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=route53resolver_associate_resolver_endpoint_ip_address]{associate_resolver_endpoint_ip_address} \tab Adds IP addresses to an inbound or an outbound Resolver endpoint\cr
\link[=route53resolver_associate_resolver_query_log_config]{associate_resolver_query_log_config} \tab Associates an Amazon VPC with a specified query logging configuration\cr
\link[=route53resolver_associate_resolver_rule]{associate_resolver_rule} \tab Associates a Resolver rule with a VPC\cr
\link[=route53resolver_create_resolver_endpoint]{create_resolver_endpoint} \tab Creates a Resolver endpoint\cr
\link[=route53resolver_create_resolver_query_log_config]{create_resolver_query_log_config} \tab Creates a Resolver query logging configuration, which defines where you want Resolver to save DNS query logs that originate in your VPCs\cr
\link[=route53resolver_create_resolver_rule]{create_resolver_rule} \tab For DNS queries that originate in your VPCs, specifies which Resolver endpoint the queries pass through, one domain name that you want to forward to your network, and the IP addresses of the DNS resolvers in your network\cr
\link[=route53resolver_delete_resolver_endpoint]{delete_resolver_endpoint} \tab Deletes a Resolver endpoint\cr
\link[=route53resolver_delete_resolver_query_log_config]{delete_resolver_query_log_config} \tab Deletes a query logging configuration\cr
\link[=route53resolver_delete_resolver_rule]{delete_resolver_rule} \tab Deletes a Resolver rule\cr
\link[=route53resolver_disassociate_resolver_endpoint_ip_address]{disassociate_resolver_endpoint_ip_address} \tab Removes IP addresses from an inbound or an outbound Resolver endpoint\cr
\link[=route53resolver_disassociate_resolver_query_log_config]{disassociate_resolver_query_log_config} \tab Disassociates a VPC from a query logging configuration\cr
\link[=route53resolver_disassociate_resolver_rule]{disassociate_resolver_rule} \tab Removes the association between a specified Resolver rule and a specified VPC\cr
\link[=route53resolver_get_resolver_dnssec_config]{get_resolver_dnssec_config} \tab Gets DNSSEC validation information for a specified resource\cr
\link[=route53resolver_get_resolver_endpoint]{get_resolver_endpoint} \tab Gets information about a specified Resolver endpoint, such as whether it's an inbound or an outbound Resolver endpoint, and the current status of the endpoint\cr
\link[=route53resolver_get_resolver_query_log_config]{get_resolver_query_log_config} \tab Gets information about a specified Resolver query logging configuration, such as the number of VPCs that the configuration is logging queries for and the location that logs are sent to\cr
\link[=route53resolver_get_resolver_query_log_config_association]{get_resolver_query_log_config_association} \tab Gets information about a specified association between a Resolver query logging configuration and an Amazon VPC\cr
\link[=route53resolver_get_resolver_query_log_config_policy]{get_resolver_query_log_config_policy} \tab Gets information about a query logging policy\cr
\link[=route53resolver_get_resolver_rule]{get_resolver_rule} \tab Gets information about a specified Resolver rule, such as the domain name that the rule forwards DNS queries for and the ID of the outbound Resolver endpoint that the rule is associated with\cr
\link[=route53resolver_get_resolver_rule_association]{get_resolver_rule_association} \tab Gets information about an association between a specified Resolver rule and a VPC\cr
\link[=route53resolver_get_resolver_rule_policy]{get_resolver_rule_policy} \tab Gets information about the Resolver rule policy for a specified rule\cr
\link[=route53resolver_list_resolver_dnssec_configs]{list_resolver_dnssec_configs} \tab Lists the configurations for DNSSEC validation that are associated with the current AWS account\cr
\link[=route53resolver_list_resolver_endpoint_ip_addresses]{list_resolver_endpoint_ip_addresses} \tab Gets the IP addresses for a specified Resolver endpoint\cr
\link[=route53resolver_list_resolver_endpoints]{list_resolver_endpoints} \tab Lists all the Resolver endpoints that were created using the current AWS account\cr
\link[=route53resolver_list_resolver_query_log_config_associations]{list_resolver_query_log_config_associations} \tab Lists information about associations between Amazon VPCs and query logging configurations\cr
\link[=route53resolver_list_resolver_query_log_configs]{list_resolver_query_log_configs} \tab Lists information about the specified query logging configurations\cr
\link[=route53resolver_list_resolver_rule_associations]{list_resolver_rule_associations} \tab Lists the associations that were created between Resolver rules and VPCs using the current AWS account\cr
\link[=route53resolver_list_resolver_rules]{list_resolver_rules} \tab Lists the Resolver rules that were created using the current AWS account\cr
\link[=route53resolver_list_tags_for_resource]{list_tags_for_resource} \tab Lists the tags that you associated with the specified resource\cr
\link[=route53resolver_put_resolver_query_log_config_policy]{put_resolver_query_log_config_policy} \tab Specifies an AWS account that you want to share a query logging configuration with, the query logging configuration that you want to share, and the operations that you want the account to be able to perform on the configuration\cr
\link[=route53resolver_put_resolver_rule_policy]{put_resolver_rule_policy} \tab Specifies an AWS rule that you want to share with another account, the account that you want to share the rule with, and the operations that you want the account to be able to perform on the rule\cr
\link[=route53resolver_tag_resource]{tag_resource} \tab Adds one or more tags to a specified resource\cr
\link[=route53resolver_untag_resource]{untag_resource} \tab Removes one or more tags from a specified resource\cr
\link[=route53resolver_update_resolver_dnssec_config]{update_resolver_dnssec_config} \tab Updates an existing DNSSEC validation configuration\cr
\link[=route53resolver_update_resolver_endpoint]{update_resolver_endpoint} \tab Updates the name of an inbound or an outbound Resolver endpoint\cr
\link[=route53resolver_update_resolver_rule]{update_resolver_rule} \tab Updates settings for a specified Resolver rule
}
}
\examples{
\dontrun{
svc <- route53resolver()
svc$associate_resolver_endpoint_ip_address(
Foo = 123
)
}
}
|
library(ape)
testtree <- read.tree("10609_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10609_1_unrooted.txt") | /codeml_files/newick_trees_processed_and_cleaned/10609_1/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("10609_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10609_1_unrooted.txt") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.nissan}
\alias{dasl.nissan}
\title{Nissan}
\format{100 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/nissan/?sf_paged=28}{Nissan}
}
\description{
Richard DeVeaux owned a Nissan Maxima for 8 years. He recorded the car’s fuel efficiency (in mpg) each time he filled the tank. He wanted to know what fuel efficiency to expect as “ordinary” for his car. Knowing this, he was able to predict when he’d need to fill the tank again and to notice if the fuel efficiency suddenly got worse, which could be a sign of trouble.
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
Deveaux Data
}
\concept{Data Display}
\concept{Normal Probability Plots}
\concept{Normal model}
| /man/dasl.nissan.Rd | no_license | sigbertklinke/mmstat.data | R | false | true | 892 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.nissan}
\alias{dasl.nissan}
\title{Nissan}
\format{100 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/nissan/?sf_paged=28}{Nissan}
}
\description{
Richard DeVeaux owned a Nissan Maxima for 8 years. He recorded the car’s fuel efficiency (in mpg) each time he filled the tank. He wanted to know what fuel efficiency to expect as “ordinary” for his car. Knowing this, he was able to predict when he’d need to fill the tank again and to notice if the fuel efficiency suddenly got worse, which could be a sign of trouble.
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
Deveaux Data
}
\concept{Data Display}
\concept{Normal Probability Plots}
\concept{Normal model}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/py.R
\name{py}
\alias{py}
\title{Convert strings of Chinese characters into Pinyin.}
\usage{
py(char = "", sep = "_", other_replace = NULL, dic = pydic())
}
\arguments{
\item{char}{a string vector}
\item{sep}{character. Seperation between the converted pinyin.}
\item{other_replace}{NULL or character. Define how to convert non-Chinese characters in mychar. NULL means 'let it be'.}
\item{dic}{the preloaded pinyin library using the \code{pylib()} function.}
}
\value{
pinyin of the given Chinese string.
}
\description{
Convert strings of Chinese characters into Pinyin.
}
\examples{
py(dic = NA)
}
| /man/py.Rd | permissive | pzhaonet/pinyin | R | false | true | 681 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/py.R
\name{py}
\alias{py}
\title{Convert strings of Chinese characters into Pinyin.}
\usage{
py(char = "", sep = "_", other_replace = NULL, dic = pydic())
}
\arguments{
\item{char}{a string vector}
\item{sep}{character. Seperation between the converted pinyin.}
\item{other_replace}{NULL or character. Define how to convert non-Chinese characters in mychar. NULL means 'let it be'.}
\item{dic}{the preloaded pinyin library using the \code{pylib()} function.}
}
\value{
pinyin of the given Chinese string.
}
\description{
Convert strings of Chinese characters into Pinyin.
}
\examples{
py(dic = NA)
}
|
#' @title S3 HTTP Requests
#'
#' @description This is the workhorse function for executing API requests for S3.
#'
#' @details This is mostly an internal function for executing API requests.
#' In almost all cases, users do not need to access this directly.
#'
#' @param verb A character string containing an HTTP verb, defaulting to \dQuote{GET}.
#' @param bucket A character string with the name of the bucket, or an object of class \dQuote{s3_bucket}. If the latter and a region can be inferred from the bucket object attributes, then that region is used instead of \code{region}.
#' @param path A character string with the name of the object to put in the bucket
#' (sometimes called the object or 'key name' in the AWS documentation.)
#' @param query any queries, passed as a named list
#' @param headers a list of request headers for the REST call.
#' @param request_body character string of request body data.
#' @param accelerate A logical indicating whether to use AWS transfer acceleration, which can produce significant speed improvements for cross-country transfers. Acceleration only works with buckets that do not have dots in bucket name.
#' @param region A character string containing the AWS region. Ignored if region can be inferred from \code{bucket}.
#' If missing, defaults to \dQuote{us-east-1}.
#' @param key A character string containing an AWS Access Key ID.
#' If missing, defaults to value stored in environment variable \dQuote{AWS_ACCESS_KEY_ID}.
#' @param secret A character string containing an AWS Secret Access Key.
#' If missing, defaults to value stored in environment variable \dQuote{AWS_SECRET_ACCESS_KEY}.
#' @param parse_response return the response as is, or parse and return as a list?
#' default is TRUE.
#' @param ... Additional arguments passed to an HTTP request function.
#' such as \code{\link[httr]{GET}}.
#'
#' @return the S3 response, or the relevant error.
#'
#' @importFrom httr GET POST PUT HEAD DELETE VERB upload_file parse_url add_headers
#' @importFrom httr http_error http_status warn_for_status content headers
#' @importFrom xml2 read_xml as_list
#' @import aws.signature
#' @export
s3HTTP <- function(verb = "GET",
bucket = "",
path = "",
query = NULL,
headers = list(),
request_body = "",
accelerate = FALSE,
region = Sys.getenv("AWS_DEFAULT_REGION", "us-east-1"),
key = Sys.getenv("AWS_ACCESS_KEY_ID"),
secret = Sys.getenv("AWS_SECRET_ACCESS_KEY"),
parse_response = TRUE,
...) {
bucketname <- get_bucketname(bucket)
bucketregion <- get_region(bucket)
if (!is.null(bucketregion)) {
region <- bucketregion
}
if (region == "") {
region <- "us-east-1"
}
url <- setup_s3_url(bucketname, region, path, accelerate)
p <- parse_url(url)
current <- Sys.time()
d_timestamp <- format(current, "%Y%m%dT%H%M%SZ", tz = "UTC")
action <- if (p$path == "") "/" else paste0("/", p$path)
canonical_headers <- c(list(host = p$hostname,
`x-amz-date` = d_timestamp), headers)
if (is.null(query) && !is.null(p$query)) {
query <- p$query
}
if (all(sapply(query, is.null))) {
query <- NULL
}
if (key == "") {
headers$`x-amz-date` <- d_timestamp
Sig <- list()
H <- do.call(add_headers, headers)
} else {
Sig <- aws.signature::signature_v4_auth(
datetime = d_timestamp,
region = region,
service = "s3",
verb = verb,
action = action,
query_args = query,
canonical_headers = canonical_headers,
request_body = request_body,
key = key, secret = secret)
headers$`x-amz-date` <- d_timestamp
headers$`x-amz-content-sha256` <- Sig$BodyHash
headers$Authorization <- Sig$SignatureHeader
H <- do.call(add_headers, headers)
}
if (verb == "GET") {
r <- GET(url, H, query = query, ...)
} else if (verb == "HEAD") {
r <- HEAD(url, H, query = query, ...)
s <- http_status(r)
if (tolower(s$category) == "success") {
out <- TRUE
attributes(out) <- c(attributes(out), headers(r))
return(out)
} else {
message(s$message)
out <- FALSE
attributes(out) <- c(attributes(out), headers(r))
return(out)
}
} else if (verb == "DELETE") {
r <- DELETE(url, H, query = query, ...)
s <- http_status(r)
if (tolower(s$category) == "success") {
out <- TRUE
attributes(out) <- c(attributes(out), headers(r))
return(out)
} else {
message(s$message)
out <- FALSE
attributes(out) <- c(attributes(out), headers(r))
return(out)
}
} else if (verb == "POST") {
r <- POST(url, H, query = query, ...)
} else if (verb == "PUT") {
if (is.character(request_body) && request_body == "") {
r <- PUT(url, H, query = query, ...)
} else if (is.character(request_body) && file.exists(request_body)) {
r <- PUT(url, H, body = upload_file(request_body), query = query, ...)
} else {
r <- PUT(url, H, body = request_body, query = query, ...)
}
} else if (verb == "OPTIONS") {
r <- VERB("OPTIONS", url, H, query = query, ...)
}
if (parse_response) {
out <- parse_aws_s3_response(r, Sig)
} else {
out <- r
}
attributes(out) <- c(attributes(out), headers(r))
out
}
parse_aws_s3_response <- function(r, Sig, verbose = getOption("verbose")){
ctype <- headers(r)$"content-type"
if (is.null(ctype) || ctype == "application/xml"){
content <- content(r, as = "text", encoding = "UTF-8")
if (content != "") {
response_contents <- as_list(read_xml(content))
response <- flatten_list(response_contents)
} else {
response <- NULL
}
} else {
response <- r
}
if (http_error(r)) {
warn_for_status(r)
h <- headers(r)
out <- structure(response, headers = h, class = "aws_error")
attr(out, "request_canonical") <- Sig$CanonicalRequest
attr(out, "request_string_to_sign") <- Sig$StringToSign
attr(out, "request_signature") <- Sig$SignatureHeader
} else {
out <- response
}
return(out)
}
setup_s3_url <- function(bucketname, region, path, accelerate) {
if (bucketname == "") {
if (region == "us-east-1") {
url <- paste0("https://s3.amazonaws.com")
} else {
url <- paste0("https://s3-", region, ".amazonaws.com")
}
} else {
if (accelerate) {
if (grepl("\\.", bucketname)) {
stop("To use accelerate, bucket name must not contain dots (.)")
}
url <- paste0("https://", bucketname, ".s3-accelerate.amazonaws.com")
} else {
if (region == "us-east-1") {
url <- paste0("https://", bucketname, ".s3.amazonaws.com")
} else {
url <- paste0("https://", bucketname, ".s3-", region, ".amazonaws.com")
}
}
}
url <- if (grepl('^[\\/].*', path)) { paste0(url, path) } else { paste(url, path, sep = "/") }
return(url)
}
| /R/s3HTTP.R | no_license | russellpierce/aws.s3 | R | false | false | 7,560 | r | #' @title S3 HTTP Requests
#'
#' @description This is the workhorse function for executing API requests for S3.
#'
#' @details This is mostly an internal function for executing API requests.
#' In almost all cases, users do not need to access this directly.
#'
#' @param verb A character string containing an HTTP verb, defaulting to \dQuote{GET}.
#' @param bucket A character string with the name of the bucket, or an object of class \dQuote{s3_bucket}. If the latter and a region can be inferred from the bucket object attributes, then that region is used instead of \code{region}.
#' @param path A character string with the name of the object to put in the bucket
#' (sometimes called the object or 'key name' in the AWS documentation.)
#' @param query any queries, passed as a named list
#' @param headers a list of request headers for the REST call.
#' @param request_body character string of request body data.
#' @param accelerate A logical indicating whether to use AWS transfer acceleration, which can produce significant speed improvements for cross-country transfers. Acceleration only works with buckets that do not have dots in bucket name.
#' @param region A character string containing the AWS region. Ignored if region can be inferred from \code{bucket}.
#' If missing, defaults to \dQuote{us-east-1}.
#' @param key A character string containing an AWS Access Key ID.
#' If missing, defaults to value stored in environment variable \dQuote{AWS_ACCESS_KEY_ID}.
#' @param secret A character string containing an AWS Secret Access Key.
#' If missing, defaults to value stored in environment variable \dQuote{AWS_SECRET_ACCESS_KEY}.
#' @param parse_response return the response as is, or parse and return as a list?
#' default is TRUE.
#' @param ... Additional arguments passed to an HTTP request function.
#' such as \code{\link[httr]{GET}}.
#'
#' @return the S3 response, or the relevant error.
#'
#' @importFrom httr GET POST PUT HEAD DELETE VERB upload_file parse_url add_headers
#' @importFrom httr http_error http_status warn_for_status content headers
#' @importFrom xml2 read_xml as_list
#' @import aws.signature
#' @export
s3HTTP <- function(verb = "GET",
bucket = "",
path = "",
query = NULL,
headers = list(),
request_body = "",
accelerate = FALSE,
region = Sys.getenv("AWS_DEFAULT_REGION", "us-east-1"),
key = Sys.getenv("AWS_ACCESS_KEY_ID"),
secret = Sys.getenv("AWS_SECRET_ACCESS_KEY"),
parse_response = TRUE,
...) {
bucketname <- get_bucketname(bucket)
bucketregion <- get_region(bucket)
if (!is.null(bucketregion)) {
region <- bucketregion
}
if (region == "") {
region <- "us-east-1"
}
url <- setup_s3_url(bucketname, region, path, accelerate)
p <- parse_url(url)
current <- Sys.time()
d_timestamp <- format(current, "%Y%m%dT%H%M%SZ", tz = "UTC")
action <- if (p$path == "") "/" else paste0("/", p$path)
canonical_headers <- c(list(host = p$hostname,
`x-amz-date` = d_timestamp), headers)
if (is.null(query) && !is.null(p$query)) {
query <- p$query
}
if (all(sapply(query, is.null))) {
query <- NULL
}
if (key == "") {
headers$`x-amz-date` <- d_timestamp
Sig <- list()
H <- do.call(add_headers, headers)
} else {
Sig <- aws.signature::signature_v4_auth(
datetime = d_timestamp,
region = region,
service = "s3",
verb = verb,
action = action,
query_args = query,
canonical_headers = canonical_headers,
request_body = request_body,
key = key, secret = secret)
headers$`x-amz-date` <- d_timestamp
headers$`x-amz-content-sha256` <- Sig$BodyHash
headers$Authorization <- Sig$SignatureHeader
H <- do.call(add_headers, headers)
}
if (verb == "GET") {
r <- GET(url, H, query = query, ...)
} else if (verb == "HEAD") {
r <- HEAD(url, H, query = query, ...)
s <- http_status(r)
if (tolower(s$category) == "success") {
out <- TRUE
attributes(out) <- c(attributes(out), headers(r))
return(out)
} else {
message(s$message)
out <- FALSE
attributes(out) <- c(attributes(out), headers(r))
return(out)
}
} else if (verb == "DELETE") {
r <- DELETE(url, H, query = query, ...)
s <- http_status(r)
if (tolower(s$category) == "success") {
out <- TRUE
attributes(out) <- c(attributes(out), headers(r))
return(out)
} else {
message(s$message)
out <- FALSE
attributes(out) <- c(attributes(out), headers(r))
return(out)
}
} else if (verb == "POST") {
r <- POST(url, H, query = query, ...)
} else if (verb == "PUT") {
if (is.character(request_body) && request_body == "") {
r <- PUT(url, H, query = query, ...)
} else if (is.character(request_body) && file.exists(request_body)) {
r <- PUT(url, H, body = upload_file(request_body), query = query, ...)
} else {
r <- PUT(url, H, body = request_body, query = query, ...)
}
} else if (verb == "OPTIONS") {
r <- VERB("OPTIONS", url, H, query = query, ...)
}
if (parse_response) {
out <- parse_aws_s3_response(r, Sig)
} else {
out <- r
}
attributes(out) <- c(attributes(out), headers(r))
out
}
parse_aws_s3_response <- function(r, Sig, verbose = getOption("verbose")){
ctype <- headers(r)$"content-type"
if (is.null(ctype) || ctype == "application/xml"){
content <- content(r, as = "text", encoding = "UTF-8")
if (content != "") {
response_contents <- as_list(read_xml(content))
response <- flatten_list(response_contents)
} else {
response <- NULL
}
} else {
response <- r
}
if (http_error(r)) {
warn_for_status(r)
h <- headers(r)
out <- structure(response, headers = h, class = "aws_error")
attr(out, "request_canonical") <- Sig$CanonicalRequest
attr(out, "request_string_to_sign") <- Sig$StringToSign
attr(out, "request_signature") <- Sig$SignatureHeader
} else {
out <- response
}
return(out)
}
setup_s3_url <- function(bucketname, region, path, accelerate) {
if (bucketname == "") {
if (region == "us-east-1") {
url <- paste0("https://s3.amazonaws.com")
} else {
url <- paste0("https://s3-", region, ".amazonaws.com")
}
} else {
if (accelerate) {
if (grepl("\\.", bucketname)) {
stop("To use accelerate, bucket name must not contain dots (.)")
}
url <- paste0("https://", bucketname, ".s3-accelerate.amazonaws.com")
} else {
if (region == "us-east-1") {
url <- paste0("https://", bucketname, ".s3.amazonaws.com")
} else {
url <- paste0("https://", bucketname, ".s3-", region, ".amazonaws.com")
}
}
}
url <- if (grepl('^[\\/].*', path)) { paste0(url, path) } else { paste(url, path, sep = "/") }
return(url)
}
|
source(here::here("./scripts/r/00_helpers.R"))
# Load syllabification data ----------------------------------------------------
syl_df <- read_csv(here("data", "dataframes", "raw", "./syllable_raw.csv"))
# Tidy data --------------------------------------------------------------------
#
# labID coding:
# - extra = hiato
# - error = simplification or wrong vowel
# - NA = triphthong
# Get critical items
critical_items_triphthongs <- c(
# [j] [w]
"lakabiaisto", "lakabuaisto", # [b]
"lakadiaisto", "lakaduaisto", # [d]
"lakafiaisto", "lakafuaisto", # [f]
"lakagiaisto", "lakaguaisto", # [g]
"lakakiaisto", "lakakuaisto", # [k]
"lakapiaisto", "lakapuaisto", # [p]
"lakatiaisto", "lakatuaisto" # [t]
)
critical_syllables <- c(
# [j] [w]
"biais", "buais", # [b]
"diais", "duais", # [d]
"fiais", "fuais", # [f]
"giais", "guais", # [g]
"kiais", "kuais", # [k]
"piais", "puais", # [p]
"tiais", "tuais" # [t]
)
# Create appropriate columns from file names
# Remove extraneous columns
# Filter to keep critical items
# Create 'response' column with three possible responses:
# - hiato
# - triphthong
# - simplification
# if_else series to fill 'response' column
syl_tidy <- syl_df %>%
separate(., col = prefix,
into = c('participant', 'exp', 'task', 'item', 'status')) %>%
select(., -ends_with('Dur'), -critOnsetLab) %>%
filter(., item %in% critical_items_triphthongs) %>%
mutate(., itemRepeat = item,
response = if_else(syll3Lab %in% critical_syllables, 'Triphthong',
if_else(!(syll3Lab %in% critical_syllables) &
labID == 'extra', 'Hiatus', 'Simplification')),
response = if_else(is.na(response), 'Simplification', response),
item = as.factor(item),
response = as.factor(response)) %>%
separate(., col = itemRepeat, into = c('fluff1', 'glide', 'fluff2'),
remove = T, sep = c(4, 6)) %>%
select(-c(fluff1, fluff2)) %>%
separate(., col = glide, into = c('pre_c', 'glide'), sep = 1, remove = T) %>%
mutate(., pre_c_voicing = if_else(pre_c %in% c('b', 'd', 'g'), 'voiced', 'voiceless'),
pre_c_poa = if_else(pre_c %in% c('b', 'p'), 'bilabial',
if_else(pre_c %in% c('d', 't'), 'dental',
if_else(pre_c == 'f', 'labiodental', 'velar')))) %>%
select(., participant:status,
critical_syllable = syll3Lab,
pre_c,
pre_c_voicing,
pre_c_poa,
glide,
response) %>%
write_csv(., file = here("data", "dataframes", "tidy", "syllabified_triphthong_tidy.csv"))
| /scripts/r/02a_tidy_syllabified_data.R | no_license | jvcasillas/glide_affiliation | R | false | false | 2,752 | r | source(here::here("./scripts/r/00_helpers.R"))
# Load syllabification data ----------------------------------------------------
syl_df <- read_csv(here("data", "dataframes", "raw", "./syllable_raw.csv"))
# Tidy data --------------------------------------------------------------------
#
# labID coding:
# - extra = hiato
# - error = simplification or wrong vowel
# - NA = triphthong
# Get critical items
critical_items_triphthongs <- c(
# [j] [w]
"lakabiaisto", "lakabuaisto", # [b]
"lakadiaisto", "lakaduaisto", # [d]
"lakafiaisto", "lakafuaisto", # [f]
"lakagiaisto", "lakaguaisto", # [g]
"lakakiaisto", "lakakuaisto", # [k]
"lakapiaisto", "lakapuaisto", # [p]
"lakatiaisto", "lakatuaisto" # [t]
)
critical_syllables <- c(
# [j] [w]
"biais", "buais", # [b]
"diais", "duais", # [d]
"fiais", "fuais", # [f]
"giais", "guais", # [g]
"kiais", "kuais", # [k]
"piais", "puais", # [p]
"tiais", "tuais" # [t]
)
# Create appropriate columns from file names
# Remove extraneous columns
# Filter to keep critical items
# Create 'response' column with three possible responses:
# - hiato
# - triphthong
# - simplification
# if_else series to fill 'response' column
syl_tidy <- syl_df %>%
separate(., col = prefix,
into = c('participant', 'exp', 'task', 'item', 'status')) %>%
select(., -ends_with('Dur'), -critOnsetLab) %>%
filter(., item %in% critical_items_triphthongs) %>%
mutate(., itemRepeat = item,
response = if_else(syll3Lab %in% critical_syllables, 'Triphthong',
if_else(!(syll3Lab %in% critical_syllables) &
labID == 'extra', 'Hiatus', 'Simplification')),
response = if_else(is.na(response), 'Simplification', response),
item = as.factor(item),
response = as.factor(response)) %>%
separate(., col = itemRepeat, into = c('fluff1', 'glide', 'fluff2'),
remove = T, sep = c(4, 6)) %>%
select(-c(fluff1, fluff2)) %>%
separate(., col = glide, into = c('pre_c', 'glide'), sep = 1, remove = T) %>%
mutate(., pre_c_voicing = if_else(pre_c %in% c('b', 'd', 'g'), 'voiced', 'voiceless'),
pre_c_poa = if_else(pre_c %in% c('b', 'p'), 'bilabial',
if_else(pre_c %in% c('d', 't'), 'dental',
if_else(pre_c == 'f', 'labiodental', 'velar')))) %>%
select(., participant:status,
critical_syllable = syll3Lab,
pre_c,
pre_c_voicing,
pre_c_poa,
glide,
response) %>%
write_csv(., file = here("data", "dataframes", "tidy", "syllabified_triphthong_tidy.csv"))
|
# Base de datos 2
pacman::p_load(purrr, stringr, dplyr, tidyr, ggplot2, lubridate)
# Importar al entorno el texto de los PDFs diarios (obtenidos previamente mediante web scraping)
load("extdata/base_de_datos_2_raw.RData")
# 2. Con la información proporcionada a nivel mundial, cree un tabla y muestre un gráfico que visualice la información mensual.
# Extraer el texto del que se obtiene la informacion por regiones de la OMS
regions_info <- map_depth(pdf_files_2, 2, pluck, 1)
# Extraer los datos de cada continente
# America
america <-
# Extraer el numero de acuerdo con la secuencia de valores detectada al principio y final del mes
map(regions_info, str_extract_all, pattern = "1[3-6],([:digit:]{3},?){2}") %>%
# Eliminar los numeros que no corresponden a posibles resultados
map_depth(2, str_subset, pattern = "0{3}", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los numeros repetidos
map(unique) %>%
# Elegir manualmente ciertos numeros
map_if(~ length(.x) == 2, pluck, 1) %>%
# Completar manualmente los numero faltantes
map_at(24, ~ "15,872,421") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Europa
europe <-
# Extraer el numero de acuerdo con la secuencia de valores detectada al principio y final del mes
map(regions_info, str_extract_all, pattern = "(?<=\\n|\\s)[4-5],([:digit:]{3},?){2}") %>%
# Eliminar los numeros que no corresponden a posibles resultados
map_depth(2, str_subset, pattern = "0{3}", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los numeros repetidos
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(c(1, 7), pluck, 1) %>%
map_if(~ length(.x) > 1, pluck, 2) %>%
# Corregir manualmente ciertos numeros
map_at(9, ~ "4,600,967") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Asia Sudoriental
southeastern_asia <-
# Extraer el numero de acuerdo con la secuencia de valores detectada al principio y final del mes
map(regions_info, str_extract_all, pattern = "(?<=\\s)[4-6],([:digit:]{3},?){2}") %>%
# Eliminar los numeros que no corresponden a posibles resultados
map_depth(2, str_subset, pattern = "0{3}", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los numeros repetidos
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(7, pluck, 2) %>%
map_if(~ length(.x) > 1, pluck, 1) %>%
# Corregir manualmente ciertos numeros
map_at(24, ~ "6,436,394") %>%
map_at(28, ~ "6,810,494") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# África
africa <-
# Extraer el numero de acuerdo con la secuencia de valores detectada al principio y final del mes
map(regions_info, str_extract_all, pattern = "(?<=\\n)1,([:digit:]{3},?){2}") %>%
# Eliminar los numeros que no corresponden a posibles resultados
map_depth(2, str_subset, pattern = "0{3}", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los numeros repetidos
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(c(2, 4, 23), pluck, 1) %>%
map_at(5:6, pluck, 2) %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Mediterráneo Oriental
eastern_mediterranean <-
# Extraer el numero de acuerdo con la secuencia de valores detectada al principio y final del mes
map(regions_info, str_extract_all, pattern = "[1-2],([:digit:]{3},?){2}") %>%
# Eliminar los numeros que no corresponden a posibles resultados
map_depth(2, str_subset, pattern = "0{3}", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los numeros repetidos
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(c(1:2, 17, 18, 28), pluck, 1) %>%
map_at(c(22:27, 30), pluck, 2) %>%
map_at(3, pluck, 3) %>%
map_if(~ length(.x) == 2, pluck, 1) %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Pacífico Occidental
western_pacific <-
# Extraer el numero de acuerdo con la secuencia de valores detectada al principio y final del mes
map(regions_info, str_extract_all, pattern = "(?<=\\n)[4-6][:digit:]{2},[:digit:]{3}") %>%
# Eliminar los numeros que no corresponden a posibles resultados
map_depth(2, str_subset, pattern = "0{3}", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los numeros repetidos
map(unique) %>%
# Elegir manualmente ciertos numeros
map_if(~ length(.x) == 2, pluck, 1) %>%
# Completar manualmente los numero faltantes
map_at(3, ~ "505,156") %>%
map_at(11, ~ "535,413") %>%
map_at(14, ~ "550,664") %>%
map_at(16, ~ "560,287") %>%
map_at(19, ~ "573,120") %>%
map_at(20, ~ "577,905") %>%
map_at(26, ~ "598,060") %>%
map_at(27, ~ "600,891") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Almacenar los resultados en una tabla
worldwide_info <- as_tibble(
list(
"Días" = seq(from = ymd('2020-09-01'), to = ymd('2020-09-30'), by = 'days'),
"América" = america,
"Asia Sudoriental" = southeastern_asia,
"Europa" = europe,
"Mediterráneo Oriental" = eastern_mediterranean,
"África" = africa,
"Pacífico Occidental" = western_pacific
)
)
worldwide_info
# Crear grafico para visualizar la información mensual
worldwide_info %>%
pivot_longer(-1, names_to = "Regiones de la OMS", values_to = "cases") %>%
ggplot(aes(x = `Días`, y = cases, color = `Regiones de la OMS`)) +
geom_line() +
scale_colour_manual(values = c("green", "red2", "purple", "blue2", "green4", "yellow3")) +
labs(y = "Número de casos", title = "Casos acumulados de COVID-19 por SARS-CoV-2 por regiones de la OMS\n(Septiembre 2020)")
# 3. Con la información de defunciones positivas, cree un tabla y muestre un gráfico que visualice la información mensual.
# Extraer el texto del que se obtiene la informacion por entidad federativa
entities_info <- map_depth(pdf_files_2, 2, pluck, 2) %>%
map(str_extract_all, pattern = "([:alpha:]+\\s){1,3}\\| [:digit:]+")
### Estados terminados ###
## Chihuahua, Chiapas, Campeche y Michoacan ##
ch <- entities_info %>%
# Elegir los valores que contengan "CH"
map_depth(2, str_subset, pattern = "[C|c][H|h]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Chihuahua
chihuahua <- ch %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "CHIHU", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 4 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{4}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(c(21, 25), pluck, 1) %>%
# Completar manualmente ciertos numeros
map_at(1, ~ "1147") %>%
map_at(2, ~ "1159") %>%
map_at(3, ~ "1169") %>%
map_at(4, ~ "1177") %>%
map_at(5, ~ "1187") %>%
map_at(6, ~ "1198") %>%
map_at(8, ~ "1211") %>%
map_at(11, ~ "1233") %>%
map_at(13, ~ "1240") %>%
map_at(14, ~ "1241") %>%
map_at(17, ~ "1258") %>%
map_at(22, ~ "1291") %>%
map_at(29, ~ "1371") %>%
map_at(30, ~ "1382") %>%
# Corregir manualmente ciertos numeros
map_at(24, ~ "1318") %>%
map_at(25, ~ "1336") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Chiapas
chiapas <- ch %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "CHIAPAS", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]+") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(3, pluck, 1) %>%
map_at(c(21, 23, 25), pluck, 2) %>%
map_at(c(22, 24), pluck, 3) %>%
# Completar manualmente ciertos numeros
map_at(1, ~ "1002") %>%
map_at(c(5, 8, 9, 11), ~ "1009") %>%
map_at(16, ~ "1016") %>%
map_at(26, ~ "1018") %>%
map_at(27, ~ "1019") %>%
map_at(29:30, ~ "1020") %>%
# Corregir manualmente ciertos numeros
map_at(19:20, ~ "1016") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Michoacan
michoacan <- ch %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "MICH", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 4 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{4}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(24:25, pluck, 1) %>%
# Completar manualmente ciertos numeros
map_at(1, ~ "1187") %>%
map_at(2, ~ "1227") %>%
map_at(4, ~ "1259") %>%
map_at(5, ~ "1277") %>%
map_at(6, ~ "1281") %>%
map_at(7, ~ "1288") %>%
map_at(8, ~ "1303") %>%
map_at(9, ~ "1328") %>%
map_at(12, ~ "1380") %>%
map_at(14, ~ "1399") %>%
map_at(17, ~ "1452") %>%
map_at(18, ~ "1468") %>%
map_at(19, ~ "1480") %>%
map_at(21, ~ "1489") %>%
map_at(22, ~ "1517") %>%
map_at(27, ~ "1588") %>%
# Corregir manualmente ciertos numeros
map_at(10, ~ "1345") %>%
map_at(16, ~ "1441") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
## Sonora, Sinaloa y San Luis Potosi ##
sn <- entities_info %>%
# Elegir los valores que comiencen con "S"
map_depth(2, str_subset, pattern = "^[S|s]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Sonora
sonora <- sn %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "SONO", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 4 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{4}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(2, pluck, 1) %>%
map_at(7, pluck, 2) %>%
# Completar manualmente ciertos numeros
map_at(5, ~ "1277") %>%
map_at(9, ~ "2771") %>%
map_at(11, ~ "2794") %>%
map_at(13, ~ "2804") %>%
map_at(14, ~ "2806") %>%
map_at(17, ~ "2814") %>%
map_at(23, ~ "2868") %>%
map_at(25, ~ "2879") %>%
map_at(26, ~ "2883") %>%
map_at(27, ~ "2884") %>%
map_at(28, ~ "2886") %>%
map_at(29, ~ "2897") %>%
map_at(30, ~ "2899") %>%
# Corregir manualmente ciertos numeros
map_at(5, ~ "2693") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Sinaloa
sinaloa <- sn %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "SINA", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 4 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{4}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(c(3:4, 11, 13), pluck, 1) %>%
# Completar manualmente ciertos numeros
map_at(5, ~ "2868") %>%
map_at(7, ~ "2879") %>%
map_at(9, ~ "2914") %>%
map_at(10, ~ "2949") %>%
map_at(15, ~ "2993") %>%
map_at(17, ~ "3004") %>%
map_at(25, ~ "3119") %>%
map_at(26, ~ "3128") %>%
map_at(27, ~ "3142") %>%
map_at(28, ~ "3150") %>%
# Corregir manualmente ciertos numeros
map_at(12, ~ "2969") %>%
map_at(20, ~ "3052") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# San Luis Potosi
san_luis_potosi <- sn %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "SANL", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 4 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{4}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Completar manualmente ciertos numeros
map_at(1, ~ "1190") %>%
map_at(2, ~ "1211") %>%
map_at(4, ~ "1243") %>%
map_at(6, ~ "1297") %>%
map_at(7, ~ "1298") %>%
map_at(8, ~ "1319") %>%
map_at(9, ~ "1355") %>%
map_at(12, ~ "1421") %>%
map_at(13, ~ "1427") %>%
map_at(14, ~ "1431") %>%
map_at(16, ~ "1457") %>%
map_at(17, ~ "1463") %>%
map_at(21, ~ "1542") %>%
map_at(24, ~ "1624") %>%
map_at(29, ~ "1681") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
## Ciudad de Mexico y Estado de Mexico ##
mex <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "MEXICO", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Ciudad de Mexico
ciudad_de_mexico <- mex %>%
# Elegir los valores que comiencen con "C"
map_depth(2, str_subset, pattern = "^C") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 5 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{5}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Completar manualmente ciertos numeros
map_at(4, ~ "10780") %>%
map_at(6, ~ "10869") %>%
map_at(7, ~ "10900") %>%
map_at(8, ~ "10986") %>%
map_at(9, ~ "11043") %>%
map_at(10, ~ "11103") %>%
map_at(11, ~ "11146") %>%
map_at(12, ~ "11199") %>%
map_at(13, ~ "11224") %>%
map_at(15, ~ "11318") %>%
map_at(16, ~ "11351") %>%
map_at(17, ~ "11403") %>%
map_at(18, ~ "11491") %>%
map_at(19, ~ "11545") %>%
map_at(20, ~ "11571") %>%
map_at(24, ~ "11814") %>%
map_at(25, ~ "11894") %>%
map_at(26, ~ "11926") %>%
map_at(27, ~ "11962") %>%
map_at(28, ~ "11996") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
## Baja California y Baja California Sur ##
bc <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "CALIFORNIA", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Baja California
baja_california <- bc %>%
# Elegir las
map_depth(2, str_subset, pattern = "[S|s][U|u][R|r]", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 4 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{4}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(12, pluck, 1) %>%
# Completar manualmente ciertos numeros
map_at(2, ~ "3174") %>%
map_at(5, ~ "3223") %>%
map_at(8, ~ "3249") %>%
map_at(17, ~ "3334") %>%
map_at(20, ~ "3374") %>%
map_at(22, ~ "3411") %>%
map_at(23, ~ "3434") %>%
map_at(24, ~ "3450") %>%
map_at(26, ~ "3478") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
### Estados no terminados ###
## Quintana Roo y Queretaro ##
qr <- entities_info %>%
# Elegir los valores que comiencen con "QU"
map_depth(2, str_subset, pattern = "^[Q|q][U|u]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
## Tabasco y Tamaulipas ##
ta <- entities_info %>%
# Elegir los valores que comiencen con "TA"
map_depth(2, str_subset, pattern = "^[T|t][A|a]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
## Guanajuato y Guerrero ##
gu <- entities_info %>%
# Elegir los valores que comiencen con "GU"
map_depth(2, str_subset, pattern = "^[G|g][U|u]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
## Colima y Coahuila ##
co <- entities_info %>%
# Elegir los valores que comiencen con "CO"
map_depth(2, str_subset, pattern = "^[C|c][O|o]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
## Tlaxcala y Oaxaca ##
xla <- entities_info %>%
# Elegir los valores que contengan "XA o XC"
map_depth(2, str_subset, pattern = "[X|x]([A|a]|[C|c])") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Veracruz
ver <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "VERACRUZ", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Puebla
pue <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "PUEBLA", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Jalisco
ja <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "JALISCO", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Nuevo Leon
nl <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "LEON", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Hidalgo
hi <- entities_info %>%
# Elegir los valores que comiencen con "H"
map_depth(2, str_subset, pattern = "^[H|h]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Yucatan
yu <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "YUC", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Morelos
mo <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "MORELOS", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Nayarit
na <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "NAYAR", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Zacatecas
za <- entities_info %>%
# Elegir los valores que contengan "ZA"
map_depth(2, str_subset, pattern = "[Z|z][A|a]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Durango
du <- entities_info %>%
# Elegir los valores que contengan "DU"
map_depth(2, str_subset, pattern = "[D|d][U|u]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Aguascalientes
ag <- entities_info %>%
# Elegir los valores que comiencen con "AG"
map_depth(2, str_subset, pattern = "^[A|a][G|g]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
### Presentacion de los resultados ###
# Almacenar los resultados en una tabla
positive_deaths_info <- as_tibble(
list(
"Días" = seq(from = ymd('2020-09-01'), to = ymd('2020-09-30'), by = 'days'),
"Baja California" = baja_california,
"Chiapas" = chiapas,
"Chihuahua" = chihuahua,
"Ciudad de México" = ciudad_de_mexico,
"Michoacán" = michoacan,
"San Luis Potosí" = san_luis_potosi,
"Sinaloa" = sinaloa,
"Sonora" = sonora
)
)
# Crear grafico para visualizar la información mensual
positive_deaths_info %>%
pivot_longer(-1, names_to = "Entidades federativas", values_to = "cases") %>%
ggplot(aes(x = `Días`, y = cases, color = `Entidades federativas`)) +
geom_line() +
labs(y = "Defunciones positivas", title = "Defunciones positivas a COVID-19 por entidad federativa\n(Septiembre 2020)")
# Almacenar los resultados en un archivo RData
save(worldwide_info, positive_deaths_info, file = "app/base_de_datos_2.RData")
| /data-raw/base_de_datos_2.R | permissive | data-and-code/archivos_pdf | R | false | false | 23,348 | r | # Base de datos 2
pacman::p_load(purrr, stringr, dplyr, tidyr, ggplot2, lubridate)
# Importar al entorno el texto de los PDFs diarios (obtenidos previamente mediante web scraping)
load("extdata/base_de_datos_2_raw.RData")
# 2. Con la información proporcionada a nivel mundial, cree un tabla y muestre un gráfico que visualice la información mensual.
# Extraer el texto del que se obtiene la informacion por regiones de la OMS
regions_info <- map_depth(pdf_files_2, 2, pluck, 1)
# Extraer los datos de cada continente
# America
america <-
# Extraer el numero de acuerdo con la secuencia de valores detectada al principio y final del mes
map(regions_info, str_extract_all, pattern = "1[3-6],([:digit:]{3},?){2}") %>%
# Eliminar los numeros que no corresponden a posibles resultados
map_depth(2, str_subset, pattern = "0{3}", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los numeros repetidos
map(unique) %>%
# Elegir manualmente ciertos numeros
map_if(~ length(.x) == 2, pluck, 1) %>%
# Completar manualmente los numero faltantes
map_at(24, ~ "15,872,421") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Europa
europe <-
# Extraer el numero de acuerdo con la secuencia de valores detectada al principio y final del mes
map(regions_info, str_extract_all, pattern = "(?<=\\n|\\s)[4-5],([:digit:]{3},?){2}") %>%
# Eliminar los numeros que no corresponden a posibles resultados
map_depth(2, str_subset, pattern = "0{3}", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los numeros repetidos
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(c(1, 7), pluck, 1) %>%
map_if(~ length(.x) > 1, pluck, 2) %>%
# Corregir manualmente ciertos numeros
map_at(9, ~ "4,600,967") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Asia Sudoriental
southeastern_asia <-
# Extraer el numero de acuerdo con la secuencia de valores detectada al principio y final del mes
map(regions_info, str_extract_all, pattern = "(?<=\\s)[4-6],([:digit:]{3},?){2}") %>%
# Eliminar los numeros que no corresponden a posibles resultados
map_depth(2, str_subset, pattern = "0{3}", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los numeros repetidos
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(7, pluck, 2) %>%
map_if(~ length(.x) > 1, pluck, 1) %>%
# Corregir manualmente ciertos numeros
map_at(24, ~ "6,436,394") %>%
map_at(28, ~ "6,810,494") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# África
africa <-
# Extraer el numero de acuerdo con la secuencia de valores detectada al principio y final del mes
map(regions_info, str_extract_all, pattern = "(?<=\\n)1,([:digit:]{3},?){2}") %>%
# Eliminar los numeros que no corresponden a posibles resultados
map_depth(2, str_subset, pattern = "0{3}", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los numeros repetidos
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(c(2, 4, 23), pluck, 1) %>%
map_at(5:6, pluck, 2) %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Mediterráneo Oriental
eastern_mediterranean <-
# Extraer el numero de acuerdo con la secuencia de valores detectada al principio y final del mes
map(regions_info, str_extract_all, pattern = "[1-2],([:digit:]{3},?){2}") %>%
# Eliminar los numeros que no corresponden a posibles resultados
map_depth(2, str_subset, pattern = "0{3}", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los numeros repetidos
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(c(1:2, 17, 18, 28), pluck, 1) %>%
map_at(c(22:27, 30), pluck, 2) %>%
map_at(3, pluck, 3) %>%
map_if(~ length(.x) == 2, pluck, 1) %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Pacífico Occidental
western_pacific <-
# Extraer el numero de acuerdo con la secuencia de valores detectada al principio y final del mes
map(regions_info, str_extract_all, pattern = "(?<=\\n)[4-6][:digit:]{2},[:digit:]{3}") %>%
# Eliminar los numeros que no corresponden a posibles resultados
map_depth(2, str_subset, pattern = "0{3}", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los numeros repetidos
map(unique) %>%
# Elegir manualmente ciertos numeros
map_if(~ length(.x) == 2, pluck, 1) %>%
# Completar manualmente los numero faltantes
map_at(3, ~ "505,156") %>%
map_at(11, ~ "535,413") %>%
map_at(14, ~ "550,664") %>%
map_at(16, ~ "560,287") %>%
map_at(19, ~ "573,120") %>%
map_at(20, ~ "577,905") %>%
map_at(26, ~ "598,060") %>%
map_at(27, ~ "600,891") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Almacenar los resultados en una tabla
worldwide_info <- as_tibble(
list(
"Días" = seq(from = ymd('2020-09-01'), to = ymd('2020-09-30'), by = 'days'),
"América" = america,
"Asia Sudoriental" = southeastern_asia,
"Europa" = europe,
"Mediterráneo Oriental" = eastern_mediterranean,
"África" = africa,
"Pacífico Occidental" = western_pacific
)
)
worldwide_info
# Crear grafico para visualizar la información mensual
worldwide_info %>%
pivot_longer(-1, names_to = "Regiones de la OMS", values_to = "cases") %>%
ggplot(aes(x = `Días`, y = cases, color = `Regiones de la OMS`)) +
geom_line() +
scale_colour_manual(values = c("green", "red2", "purple", "blue2", "green4", "yellow3")) +
labs(y = "Número de casos", title = "Casos acumulados de COVID-19 por SARS-CoV-2 por regiones de la OMS\n(Septiembre 2020)")
# 3. Con la información de defunciones positivas, cree un tabla y muestre un gráfico que visualice la información mensual.
# Extraer el texto del que se obtiene la informacion por entidad federativa
entities_info <- map_depth(pdf_files_2, 2, pluck, 2) %>%
map(str_extract_all, pattern = "([:alpha:]+\\s){1,3}\\| [:digit:]+")
### Estados terminados ###
## Chihuahua, Chiapas, Campeche y Michoacan ##
ch <- entities_info %>%
# Elegir los valores que contengan "CH"
map_depth(2, str_subset, pattern = "[C|c][H|h]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Chihuahua
chihuahua <- ch %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "CHIHU", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 4 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{4}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(c(21, 25), pluck, 1) %>%
# Completar manualmente ciertos numeros
map_at(1, ~ "1147") %>%
map_at(2, ~ "1159") %>%
map_at(3, ~ "1169") %>%
map_at(4, ~ "1177") %>%
map_at(5, ~ "1187") %>%
map_at(6, ~ "1198") %>%
map_at(8, ~ "1211") %>%
map_at(11, ~ "1233") %>%
map_at(13, ~ "1240") %>%
map_at(14, ~ "1241") %>%
map_at(17, ~ "1258") %>%
map_at(22, ~ "1291") %>%
map_at(29, ~ "1371") %>%
map_at(30, ~ "1382") %>%
# Corregir manualmente ciertos numeros
map_at(24, ~ "1318") %>%
map_at(25, ~ "1336") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Chiapas
chiapas <- ch %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "CHIAPAS", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]+") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(3, pluck, 1) %>%
map_at(c(21, 23, 25), pluck, 2) %>%
map_at(c(22, 24), pluck, 3) %>%
# Completar manualmente ciertos numeros
map_at(1, ~ "1002") %>%
map_at(c(5, 8, 9, 11), ~ "1009") %>%
map_at(16, ~ "1016") %>%
map_at(26, ~ "1018") %>%
map_at(27, ~ "1019") %>%
map_at(29:30, ~ "1020") %>%
# Corregir manualmente ciertos numeros
map_at(19:20, ~ "1016") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Michoacan
michoacan <- ch %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "MICH", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 4 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{4}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(24:25, pluck, 1) %>%
# Completar manualmente ciertos numeros
map_at(1, ~ "1187") %>%
map_at(2, ~ "1227") %>%
map_at(4, ~ "1259") %>%
map_at(5, ~ "1277") %>%
map_at(6, ~ "1281") %>%
map_at(7, ~ "1288") %>%
map_at(8, ~ "1303") %>%
map_at(9, ~ "1328") %>%
map_at(12, ~ "1380") %>%
map_at(14, ~ "1399") %>%
map_at(17, ~ "1452") %>%
map_at(18, ~ "1468") %>%
map_at(19, ~ "1480") %>%
map_at(21, ~ "1489") %>%
map_at(22, ~ "1517") %>%
map_at(27, ~ "1588") %>%
# Corregir manualmente ciertos numeros
map_at(10, ~ "1345") %>%
map_at(16, ~ "1441") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
## Sonora, Sinaloa y San Luis Potosi ##
sn <- entities_info %>%
# Elegir los valores que comiencen con "S"
map_depth(2, str_subset, pattern = "^[S|s]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Sonora
sonora <- sn %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "SONO", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 4 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{4}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(2, pluck, 1) %>%
map_at(7, pluck, 2) %>%
# Completar manualmente ciertos numeros
map_at(5, ~ "1277") %>%
map_at(9, ~ "2771") %>%
map_at(11, ~ "2794") %>%
map_at(13, ~ "2804") %>%
map_at(14, ~ "2806") %>%
map_at(17, ~ "2814") %>%
map_at(23, ~ "2868") %>%
map_at(25, ~ "2879") %>%
map_at(26, ~ "2883") %>%
map_at(27, ~ "2884") %>%
map_at(28, ~ "2886") %>%
map_at(29, ~ "2897") %>%
map_at(30, ~ "2899") %>%
# Corregir manualmente ciertos numeros
map_at(5, ~ "2693") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# Sinaloa
sinaloa <- sn %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "SINA", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 4 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{4}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(c(3:4, 11, 13), pluck, 1) %>%
# Completar manualmente ciertos numeros
map_at(5, ~ "2868") %>%
map_at(7, ~ "2879") %>%
map_at(9, ~ "2914") %>%
map_at(10, ~ "2949") %>%
map_at(15, ~ "2993") %>%
map_at(17, ~ "3004") %>%
map_at(25, ~ "3119") %>%
map_at(26, ~ "3128") %>%
map_at(27, ~ "3142") %>%
map_at(28, ~ "3150") %>%
# Corregir manualmente ciertos numeros
map_at(12, ~ "2969") %>%
map_at(20, ~ "3052") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
# San Luis Potosi
san_luis_potosi <- sn %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "SANL", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 4 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{4}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Completar manualmente ciertos numeros
map_at(1, ~ "1190") %>%
map_at(2, ~ "1211") %>%
map_at(4, ~ "1243") %>%
map_at(6, ~ "1297") %>%
map_at(7, ~ "1298") %>%
map_at(8, ~ "1319") %>%
map_at(9, ~ "1355") %>%
map_at(12, ~ "1421") %>%
map_at(13, ~ "1427") %>%
map_at(14, ~ "1431") %>%
map_at(16, ~ "1457") %>%
map_at(17, ~ "1463") %>%
map_at(21, ~ "1542") %>%
map_at(24, ~ "1624") %>%
map_at(29, ~ "1681") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
## Ciudad de Mexico y Estado de Mexico ##
mex <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "MEXICO", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Ciudad de Mexico
ciudad_de_mexico <- mex %>%
# Elegir los valores que comiencen con "C"
map_depth(2, str_subset, pattern = "^C") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 5 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{5}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Completar manualmente ciertos numeros
map_at(4, ~ "10780") %>%
map_at(6, ~ "10869") %>%
map_at(7, ~ "10900") %>%
map_at(8, ~ "10986") %>%
map_at(9, ~ "11043") %>%
map_at(10, ~ "11103") %>%
map_at(11, ~ "11146") %>%
map_at(12, ~ "11199") %>%
map_at(13, ~ "11224") %>%
map_at(15, ~ "11318") %>%
map_at(16, ~ "11351") %>%
map_at(17, ~ "11403") %>%
map_at(18, ~ "11491") %>%
map_at(19, ~ "11545") %>%
map_at(20, ~ "11571") %>%
map_at(24, ~ "11814") %>%
map_at(25, ~ "11894") %>%
map_at(26, ~ "11926") %>%
map_at(27, ~ "11962") %>%
map_at(28, ~ "11996") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
## Baja California y Baja California Sur ##
bc <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "CALIFORNIA", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Baja California
baja_california <- bc %>%
# Elegir las
map_depth(2, str_subset, pattern = "[S|s][U|u][R|r]", negate = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Extraer los numeros de 4 digitos de las cadenas
map_depth(1, str_extract_all, pattern = "[:digit:]{4}") %>%
# Poner los resultados en un solo arreglo por dia
map_depth(1, flatten_chr) %>%
# Eliminar los resultados repetido
map(unique) %>%
# Elegir manualmente ciertos numeros
map_at(12, pluck, 1) %>%
# Completar manualmente ciertos numeros
map_at(2, ~ "3174") %>%
map_at(5, ~ "3223") %>%
map_at(8, ~ "3249") %>%
map_at(17, ~ "3334") %>%
map_at(20, ~ "3374") %>%
map_at(22, ~ "3411") %>%
map_at(23, ~ "3434") %>%
map_at(24, ~ "3450") %>%
map_at(26, ~ "3478") %>%
# Convertir la lista en un arreglo numerico
flatten_chr() %>%
str_remove_all(pattern = ",") %>%
as.integer()
### Estados no terminados ###
## Quintana Roo y Queretaro ##
qr <- entities_info %>%
# Elegir los valores que comiencen con "QU"
map_depth(2, str_subset, pattern = "^[Q|q][U|u]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
## Tabasco y Tamaulipas ##
ta <- entities_info %>%
# Elegir los valores que comiencen con "TA"
map_depth(2, str_subset, pattern = "^[T|t][A|a]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
## Guanajuato y Guerrero ##
gu <- entities_info %>%
# Elegir los valores que comiencen con "GU"
map_depth(2, str_subset, pattern = "^[G|g][U|u]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
## Colima y Coahuila ##
co <- entities_info %>%
# Elegir los valores que comiencen con "CO"
map_depth(2, str_subset, pattern = "^[C|c][O|o]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
## Tlaxcala y Oaxaca ##
xla <- entities_info %>%
# Elegir los valores que contengan "XA o XC"
map_depth(2, str_subset, pattern = "[X|x]([A|a]|[C|c])") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Veracruz
ver <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "VERACRUZ", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Puebla
pue <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "PUEBLA", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Jalisco
ja <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "JALISCO", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Nuevo Leon
nl <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "LEON", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Hidalgo
hi <- entities_info %>%
# Elegir los valores que comiencen con "H"
map_depth(2, str_subset, pattern = "^[H|h]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Yucatan
yu <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "YUC", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Morelos
mo <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "MORELOS", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Nayarit
na <- entities_info %>%
# Aproximar la busqueda de texto usando logica difusa (Usando la distancia de edicion de Levenshtein)
map_depth(2, agrep, pattern = "NAYAR", value = TRUE) %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Zacatecas
za <- entities_info %>%
# Elegir los valores que contengan "ZA"
map_depth(2, str_subset, pattern = "[Z|z][A|a]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Durango
du <- entities_info %>%
# Elegir los valores que contengan "DU"
map_depth(2, str_subset, pattern = "[D|d][U|u]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
# Aguascalientes
ag <- entities_info %>%
# Elegir los valores que comiencen con "AG"
map_depth(2, str_subset, pattern = "^[A|a][G|g]") %>%
# Poner los resultados en un solo arreglo por dia
transpose() %>%
map_depth(1, flatten_chr) %>%
# Eliminar los valores repetidos
map(unique)
### Presentacion de los resultados ###
# Almacenar los resultados en una tabla
positive_deaths_info <- as_tibble(
list(
"Días" = seq(from = ymd('2020-09-01'), to = ymd('2020-09-30'), by = 'days'),
"Baja California" = baja_california,
"Chiapas" = chiapas,
"Chihuahua" = chihuahua,
"Ciudad de México" = ciudad_de_mexico,
"Michoacán" = michoacan,
"San Luis Potosí" = san_luis_potosi,
"Sinaloa" = sinaloa,
"Sonora" = sonora
)
)
# Crear grafico para visualizar la información mensual
positive_deaths_info %>%
pivot_longer(-1, names_to = "Entidades federativas", values_to = "cases") %>%
ggplot(aes(x = `Días`, y = cases, color = `Entidades federativas`)) +
geom_line() +
labs(y = "Defunciones positivas", title = "Defunciones positivas a COVID-19 por entidad federativa\n(Septiembre 2020)")
# Almacenar los resultados en un archivo RData
save(worldwide_info, positive_deaths_info, file = "app/base_de_datos_2.RData")
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## My comments:
## The function below creates a special matrix in a few steps
## 1. set the value of the matrix via `set`
## 2. get the value of the matrix via `get`
## 3. set the value of the inverse via `setinv`
## 4. get the value of the inverse via `getinv`
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
## My comments:
## This function calculates the inverse of the special matrix created with the
## function above.
## 1. It checks if the inverse has already been claculated
## 2. If yes, it gets the inverse from the cache skipping the calculation
## 3. If no, it calculates the inverse and stores it in the cache via setinv
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if (!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
## Quick check for lazy checkers ;)
# set.seed(1)
# mat <- matrix(sample(1:10, 100, replace = TRUE), 10, 10)
# inv <- solve(mat)
# x <- makeCacheMatrix(mat)
# cacheSolve(x)
## Thank you! Bye! | /cachematrix.R | no_license | nikolaypugachyov/ProgrammingAssignment2 | R | false | false | 1,607 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## My comments:
## The function below creates a special matrix in a few steps
## 1. set the value of the matrix via `set`
## 2. get the value of the matrix via `get`
## 3. set the value of the inverse via `setinv`
## 4. get the value of the inverse via `getinv`
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
## My comments:
## This function calculates the inverse of the special matrix created with the
## function above.
## 1. It checks if the inverse has already been claculated
## 2. If yes, it gets the inverse from the cache skipping the calculation
## 3. If no, it calculates the inverse and stores it in the cache via setinv
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if (!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
## Quick check for lazy checkers ;)
# set.seed(1)
# mat <- matrix(sample(1:10, 100, replace = TRUE), 10, 10)
# inv <- solve(mat)
# x <- makeCacheMatrix(mat)
# cacheSolve(x)
## Thank you! Bye! |
library(tidyverse)
library(tuneR)
library(ggplot2)
library(caret)
library(tsfeatures)
library(e1071)
library(randomForest)
library(tree)
library(plotrix)
library(reprtree)
library(cvTools)
library(MLmetrics)
library(reshape2)
library(gridExtra)
########## Read Louis Short Files ##########
files <- list.files(path="Spiker_box_Louis/Short/")
short_wave <- lapply(files, function(x) readWave(paste("Spiker_box_Louis/Short/", x, sep='')))
names(short_wave) <- lapply(files, function(x) substr(x,1,6))
########## Read Zoe Short Files ##########
all_files_short <- list.files("zoe_spiker/Length3/")
wave_file_short <- list()
for (i in all_files_short){
wave_file_short <- c(wave_file_short, list(readWave(file.path("zoe_spiker/Length3/",i))))}
wave_label_short <- lapply(strsplit(all_files_short, "_"), "[[", 1)
wave_label_short <- lapply(wave_label_short, function(x) strsplit(x, "")[[1]])
########## Create Data Frame Function ##########
create_df <- function(waveSeq) {
timeSeq <- seq_len(length(waveSeq))/waveSeq@samp.rate
df <- data.frame(time = timeSeq,
Y = waveSeq@left,
event_type = "none",
event_time = NA,
event_pos = NA)
df$event_type <- as.character(df$event_type)
return(df = df)
}
df <- create_df(short_wave$LRL_L3)
########## identify_event_time_zeroCrossing ##########
identify_event_time_zeroCrossing <-
function(Y, time,
windowSize = 0.5,
thresholdEvents = 20,
downSampleRate = 50) {
ind <- seq_len(which(time == time[length(time)] - windowSize) + 1)
# until the first element of the last window
ind <- seq(1, ind[length(ind)], by = downSampleRate)
timeMiddle <- time[ind] + windowSize/2
testStat <- rep(NA, length(ind))
for (i in 1:length(ind)) {
Y_subset <- Y[time >= time[ind[i]] & time < time[ind[i]] + windowSize]
testStat[i] <- sum(Y_subset[1:(length(Y_subset) - 1)] * Y_subset[2:(length(Y_subset))] <= 0)
}
predictedEvent <- which(testStat < thresholdEvents)
eventTimes <- timeMiddle[predictedEvent]
deltaSameEvent <- windowSize
gaps <- which(diff(eventTimes) > deltaSameEvent)
event_time_interval <- c()
event_time_interval <- min(eventTimes)
for (i in 1:length(gaps)) {
event_time_interval <- append(event_time_interval, c(eventTimes[gaps[i]], eventTimes[gaps[i] + 1]))
}
event_time_interval <- append(event_time_interval, max(eventTimes))
event_time_interval <- matrix(event_time_interval, ncol = 2, byrow = TRUE)
predictedEventTimes <- rep(FALSE, length(Y))
for (i in 1:nrow(event_time_interval)) {
predictedEventTimes[event_time_interval[i, 1] <= time &
event_time_interval[i,2] >= time] <- TRUE
}
num_event <- length(gaps) + 1
return(list(num_event = num_event,
predictedEventTimes = predictedEventTimes,
predictedInterval = event_time_interval))
}
res = identify_event_time_zeroCrossing(df$Y, df$time)
########## identify_event_sd ##########
identify_event_sd <- function(Y, xtime,
windowSize = 1,
thresholdEvents = 650,
downSampleRate = 25) {
x = max(xtime) - windowSize
indexLastWindow = max(which(xtime <= x)) + 1
ind = seq(1, indexLastWindow, by = downSampleRate)
timeMiddle <- xtime[ind] + windowSize/2
testStat = rep(NA, length(ind))
for (i in 1:length(ind)) {
Y_subset <- Y[xtime >= xtime[ind[i]] & xtime < xtime[ind[i]] + windowSize]
testStat[i] <- sd(Y_subset)
}
predictedEvent <- which(testStat > thresholdEvents)
eventTimes <- timeMiddle[predictedEvent] # map back to the time of this
gaps <- which(diff(eventTimes) > mean(diff(eventTimes)))
noiseInterval <- rbind(
c(range(xtime)[1], min(eventTimes)),
cbind(eventTimes[gaps], eventTimes[gaps+1]),
c(max(eventTimes), range(xtime)[2])
)
eventsInterval <- cbind(noiseInterval[-nrow(noiseInterval),2],
noiseInterval[-1,1])
return(list(num_event = length(gaps) + 1,
predictedNoiseInterval = noiseInterval,
predictedEventInterval = eventsInterval))
}
# wave_file = wave_file_short[[1]]
# Y = wave_file@left
# xtime = seq_len(length(wave_file))/wave_file@samp.rate
# cut_result = identify_event_sd(Y, xtime)
########## Extract Signal for One ##########
extractSignal = function(limits, seq, xtime)
{
index = (xtime > limits[1]) & (xtime < limits[2])
return(seq[index])
}
# wave_seq_short = apply(cut_result$predictedEventInterval, 1, extractSignal, Y, xtime)
########## Extract Signal for All ##########
wave_seq_short = list()
for(i in 1:length(wave_file_short)){
print(i)
wave_file = wave_file_short[[i]]
Y = wave_file@left
xtime = seq_len(length(wave_file))/wave_file@samp.rate
cut_result = identify_event_time_zeroCrossing(Y, xtime)
wave_seq_short[[i]] = apply(cut_result$predictedEventInterval, 1, extractSignal, Y, xtime)
}
# plot(wave_seq_short[[12]][[1]], type="l")
wave_seq_short[[12]] = wave_seq_short[[12]][1:3]
wave_seq_short[[11]] = wave_seq_short[[11]][1:3]
########## Left-Right Classifier ##########
LRclassify = function(waveseq)
{
maxPos = which.max(waveseq) ## the position of the maximum value
minPos = which.min(waveseq) ## the position of the minimum value
call = ifelse(maxPos < minPos, "Left", "Right")
return(call)
}
########## Update Data Frame Function ##########
update_df <- function(df,result) {
df$event_type = result$predictedEventTimes
for (i in 1:result$num_event) {
t_idx = (df$time >= result$predictedInterval[i, 1]) & (df$time <= result$predictedInterval[i, 2])
df$event_time[t_idx] <- seq_len(sum(t_idx))
df$event_pos[t_idx] <- i
}
return(df = df)
}
df = update_df(df,res)
ggplot(df,aes(x=time,y=Y,col=event_type,group=1))+geom_line()
########## Feature Extraction ##########
Y_list = unlist(wave_seq_short, recursive=FALSE)
Y_lab = unlist(wave_label_short)
Y_features <- cbind(
tsfeatures(Y_list,
c("acf_features","entropy","lumpiness",
"flat_spots","crossing_points")),
tsfeatures(Y_list, "max_kl_shift", width=48),
tsfeatures(Y_list,
c("mean","var"), scale=FALSE, na.rm=TRUE),
tsfeatures(Y_list,
c("max_level_shift","max_var_shift"), trim=TRUE))
Y_features = Y_features[,-7]
saveRDS(Y_features,file='features.rds')
saveRDS(Y_lab,file='lab.rds')
########## Classification Models ##########
cvK = 5
ACC_knn = ACC_rf = ACC_svm = NA
F1_knn = F1_rf = F1_svm = NA
n = length(Y_lab)
for (i in 1:50) {
cvSets = cvTools::cvFolds(n, cvK)
acc_knn = acc_rf = acc_svm = NA
f1_knn = f1_rf = f1_svm = NA
for (j in 1:cvK) {
test_id = cvSets$subsets[cvSets$which == j]
X_test = Y_features[test_id, ]
X_train = Y_features[-test_id, ]
y_test = Y_lab[test_id]
y_train = Y_lab[-test_id]
knn_fit = class::knn(train = X_train, test = X_test, cl = y_train, k = 3)
acc_knn[j] = MLmetrics::Accuracy(y_pred = knn_fit, y_true = y_test)
f1_knn[j] = MLmetrics::F1_Score(y_pred = knn_fit, y_true = y_test)
rf_res = randomForest::randomForest(x = X_train, y = as.factor(y_train))
rf_fit = predict(rf_res, X_test)
acc_rf[j] = MLmetrics::Accuracy(y_pred = rf_fit, y_true = y_test)
f1_rf[j] = MLmetrics::F1_Score(y_pred = rf_fit, y_true = y_test)
svm_res = e1071::svm(x = X_train, y = as.factor(y_train))
svm_fit = predict(svm_res, X_test)
acc_svm[j] = MLmetrics::Accuracy(y_pred = svm_fit, y_true = y_test)
f1_svm[j] = MLmetrics::F1_Score(y_pred = svm_fit, y_true = y_test)
}
ACC_knn[i] = mean(acc_knn)
ACC_rf[i] = mean(acc_rf)
ACC_svm[i] = mean(acc_svm)
F1_knn[i] = mean(f1_knn)
F1_rf[i] = mean(f1_rf)
F1_svm[i] = mean(f1_svm)
}
ACC = data.frame(1:50,ACC_knn,ACC_rf,ACC_svm)
names(ACC) = c('id','KNN','RandomForest','SVM')
ACC = melt(ACC, id.vars = 'id', variable.name = 'Models', value.name = 'Accuracy')
F1Score = data.frame(1:50,F1_knn,F1_rf,F1_svm)
names(F1Score) = c('id','KNN','RandomForest','SVM')
F1Score = melt(F1Score, id.vars = 'id', variable.name = 'Models', value.name = 'F1_Score')
metrics = left_join(ACC,F1Score,by=c('id','Models'))
acc_hist = metrics %>% ggplot(aes(Accuracy)) +
geom_histogram(binwidth = 0.005) +
facet_wrap(~Models) +
ggtitle('Accuracy Distribution')
acc_box = metrics %>% ggplot() +
geom_boxplot(aes(y=Accuracy)) +
coord_flip() +
facet_wrap(~Models)
grid.arrange(acc_hist,acc_box,nrow=2)
f1_hist = metrics %>% ggplot(aes(F1_Score)) +
geom_histogram(binwidth = 0.005) +
facet_wrap(~Models) +
ggtitle('F1 Score Distribution')
f1_box = metrics %>% ggplot() +
geom_boxplot(aes(y=F1_Score)) +
coord_flip() +
facet_wrap(~Models)
grid.arrange(f1_hist,f1_box,nrow=2)
| /brain_wave.R | no_license | sherry-fan01/Eye-Movement-Detection | R | false | false | 8,917 | r | library(tidyverse)
library(tuneR)
library(ggplot2)
library(caret)
library(tsfeatures)
library(e1071)
library(randomForest)
library(tree)
library(plotrix)
library(reprtree)
library(cvTools)
library(MLmetrics)
library(reshape2)
library(gridExtra)
########## Read Louis Short Files ##########
files <- list.files(path="Spiker_box_Louis/Short/")
short_wave <- lapply(files, function(x) readWave(paste("Spiker_box_Louis/Short/", x, sep='')))
names(short_wave) <- lapply(files, function(x) substr(x,1,6))
########## Read Zoe Short Files ##########
all_files_short <- list.files("zoe_spiker/Length3/")
wave_file_short <- list()
for (i in all_files_short){
wave_file_short <- c(wave_file_short, list(readWave(file.path("zoe_spiker/Length3/",i))))}
wave_label_short <- lapply(strsplit(all_files_short, "_"), "[[", 1)
wave_label_short <- lapply(wave_label_short, function(x) strsplit(x, "")[[1]])
########## Create Data Frame Function ##########
create_df <- function(waveSeq) {
timeSeq <- seq_len(length(waveSeq))/waveSeq@samp.rate
df <- data.frame(time = timeSeq,
Y = waveSeq@left,
event_type = "none",
event_time = NA,
event_pos = NA)
df$event_type <- as.character(df$event_type)
return(df = df)
}
df <- create_df(short_wave$LRL_L3)
########## identify_event_time_zeroCrossing ##########
identify_event_time_zeroCrossing <-
function(Y, time,
windowSize = 0.5,
thresholdEvents = 20,
downSampleRate = 50) {
ind <- seq_len(which(time == time[length(time)] - windowSize) + 1)
# until the first element of the last window
ind <- seq(1, ind[length(ind)], by = downSampleRate)
timeMiddle <- time[ind] + windowSize/2
testStat <- rep(NA, length(ind))
for (i in 1:length(ind)) {
Y_subset <- Y[time >= time[ind[i]] & time < time[ind[i]] + windowSize]
testStat[i] <- sum(Y_subset[1:(length(Y_subset) - 1)] * Y_subset[2:(length(Y_subset))] <= 0)
}
predictedEvent <- which(testStat < thresholdEvents)
eventTimes <- timeMiddle[predictedEvent]
deltaSameEvent <- windowSize
gaps <- which(diff(eventTimes) > deltaSameEvent)
event_time_interval <- c()
event_time_interval <- min(eventTimes)
for (i in 1:length(gaps)) {
event_time_interval <- append(event_time_interval, c(eventTimes[gaps[i]], eventTimes[gaps[i] + 1]))
}
event_time_interval <- append(event_time_interval, max(eventTimes))
event_time_interval <- matrix(event_time_interval, ncol = 2, byrow = TRUE)
predictedEventTimes <- rep(FALSE, length(Y))
for (i in 1:nrow(event_time_interval)) {
predictedEventTimes[event_time_interval[i, 1] <= time &
event_time_interval[i,2] >= time] <- TRUE
}
num_event <- length(gaps) + 1
return(list(num_event = num_event,
predictedEventTimes = predictedEventTimes,
predictedInterval = event_time_interval))
}
res = identify_event_time_zeroCrossing(df$Y, df$time)
########## identify_event_sd ##########
identify_event_sd <- function(Y, xtime,
windowSize = 1,
thresholdEvents = 650,
downSampleRate = 25) {
x = max(xtime) - windowSize
indexLastWindow = max(which(xtime <= x)) + 1
ind = seq(1, indexLastWindow, by = downSampleRate)
timeMiddle <- xtime[ind] + windowSize/2
testStat = rep(NA, length(ind))
for (i in 1:length(ind)) {
Y_subset <- Y[xtime >= xtime[ind[i]] & xtime < xtime[ind[i]] + windowSize]
testStat[i] <- sd(Y_subset)
}
predictedEvent <- which(testStat > thresholdEvents)
eventTimes <- timeMiddle[predictedEvent] # map back to the time of this
gaps <- which(diff(eventTimes) > mean(diff(eventTimes)))
noiseInterval <- rbind(
c(range(xtime)[1], min(eventTimes)),
cbind(eventTimes[gaps], eventTimes[gaps+1]),
c(max(eventTimes), range(xtime)[2])
)
eventsInterval <- cbind(noiseInterval[-nrow(noiseInterval),2],
noiseInterval[-1,1])
return(list(num_event = length(gaps) + 1,
predictedNoiseInterval = noiseInterval,
predictedEventInterval = eventsInterval))
}
# wave_file = wave_file_short[[1]]
# Y = wave_file@left
# xtime = seq_len(length(wave_file))/wave_file@samp.rate
# cut_result = identify_event_sd(Y, xtime)
########## Extract Signal for One ##########
extractSignal = function(limits, seq, xtime)
{
index = (xtime > limits[1]) & (xtime < limits[2])
return(seq[index])
}
# wave_seq_short = apply(cut_result$predictedEventInterval, 1, extractSignal, Y, xtime)
########## Extract Signal for All ##########
wave_seq_short = list()
for(i in 1:length(wave_file_short)){
print(i)
wave_file = wave_file_short[[i]]
Y = wave_file@left
xtime = seq_len(length(wave_file))/wave_file@samp.rate
cut_result = identify_event_time_zeroCrossing(Y, xtime)
wave_seq_short[[i]] = apply(cut_result$predictedEventInterval, 1, extractSignal, Y, xtime)
}
# plot(wave_seq_short[[12]][[1]], type="l")
wave_seq_short[[12]] = wave_seq_short[[12]][1:3]
wave_seq_short[[11]] = wave_seq_short[[11]][1:3]
########## Left-Right Classifier ##########
LRclassify = function(waveseq)
{
maxPos = which.max(waveseq) ## the position of the maximum value
minPos = which.min(waveseq) ## the position of the minimum value
call = ifelse(maxPos < minPos, "Left", "Right")
return(call)
}
########## Update Data Frame Function ##########
update_df <- function(df,result) {
df$event_type = result$predictedEventTimes
for (i in 1:result$num_event) {
t_idx = (df$time >= result$predictedInterval[i, 1]) & (df$time <= result$predictedInterval[i, 2])
df$event_time[t_idx] <- seq_len(sum(t_idx))
df$event_pos[t_idx] <- i
}
return(df = df)
}
df = update_df(df,res)
ggplot(df,aes(x=time,y=Y,col=event_type,group=1))+geom_line()
########## Feature Extraction ##########
Y_list = unlist(wave_seq_short, recursive=FALSE)
Y_lab = unlist(wave_label_short)
Y_features <- cbind(
tsfeatures(Y_list,
c("acf_features","entropy","lumpiness",
"flat_spots","crossing_points")),
tsfeatures(Y_list, "max_kl_shift", width=48),
tsfeatures(Y_list,
c("mean","var"), scale=FALSE, na.rm=TRUE),
tsfeatures(Y_list,
c("max_level_shift","max_var_shift"), trim=TRUE))
Y_features = Y_features[,-7]
saveRDS(Y_features,file='features.rds')
saveRDS(Y_lab,file='lab.rds')
########## Classification Models ##########
cvK = 5
ACC_knn = ACC_rf = ACC_svm = NA
F1_knn = F1_rf = F1_svm = NA
n = length(Y_lab)
for (i in 1:50) {
cvSets = cvTools::cvFolds(n, cvK)
acc_knn = acc_rf = acc_svm = NA
f1_knn = f1_rf = f1_svm = NA
for (j in 1:cvK) {
test_id = cvSets$subsets[cvSets$which == j]
X_test = Y_features[test_id, ]
X_train = Y_features[-test_id, ]
y_test = Y_lab[test_id]
y_train = Y_lab[-test_id]
knn_fit = class::knn(train = X_train, test = X_test, cl = y_train, k = 3)
acc_knn[j] = MLmetrics::Accuracy(y_pred = knn_fit, y_true = y_test)
f1_knn[j] = MLmetrics::F1_Score(y_pred = knn_fit, y_true = y_test)
rf_res = randomForest::randomForest(x = X_train, y = as.factor(y_train))
rf_fit = predict(rf_res, X_test)
acc_rf[j] = MLmetrics::Accuracy(y_pred = rf_fit, y_true = y_test)
f1_rf[j] = MLmetrics::F1_Score(y_pred = rf_fit, y_true = y_test)
svm_res = e1071::svm(x = X_train, y = as.factor(y_train))
svm_fit = predict(svm_res, X_test)
acc_svm[j] = MLmetrics::Accuracy(y_pred = svm_fit, y_true = y_test)
f1_svm[j] = MLmetrics::F1_Score(y_pred = svm_fit, y_true = y_test)
}
ACC_knn[i] = mean(acc_knn)
ACC_rf[i] = mean(acc_rf)
ACC_svm[i] = mean(acc_svm)
F1_knn[i] = mean(f1_knn)
F1_rf[i] = mean(f1_rf)
F1_svm[i] = mean(f1_svm)
}
ACC = data.frame(1:50,ACC_knn,ACC_rf,ACC_svm)
names(ACC) = c('id','KNN','RandomForest','SVM')
ACC = melt(ACC, id.vars = 'id', variable.name = 'Models', value.name = 'Accuracy')
F1Score = data.frame(1:50,F1_knn,F1_rf,F1_svm)
names(F1Score) = c('id','KNN','RandomForest','SVM')
F1Score = melt(F1Score, id.vars = 'id', variable.name = 'Models', value.name = 'F1_Score')
metrics = left_join(ACC,F1Score,by=c('id','Models'))
acc_hist = metrics %>% ggplot(aes(Accuracy)) +
geom_histogram(binwidth = 0.005) +
facet_wrap(~Models) +
ggtitle('Accuracy Distribution')
acc_box = metrics %>% ggplot() +
geom_boxplot(aes(y=Accuracy)) +
coord_flip() +
facet_wrap(~Models)
grid.arrange(acc_hist,acc_box,nrow=2)
f1_hist = metrics %>% ggplot(aes(F1_Score)) +
geom_histogram(binwidth = 0.005) +
facet_wrap(~Models) +
ggtitle('F1 Score Distribution')
f1_box = metrics %>% ggplot() +
geom_boxplot(aes(y=F1_Score)) +
coord_flip() +
facet_wrap(~Models)
grid.arrange(f1_hist,f1_box,nrow=2)
|
rep = list.files(pattern=".cov.tmp.bed",full.names=TRUE)
library(reshape2)
library("ggpubr")
pdf("Figure_S1.pdf",height=15,width=20,onefile=TRUE)
for (i in rep){
a <- read.table(file=i,header=T)
repclass <- gsub("_"," ",sapply(strsplit(sapply(strsplit(sapply(strsplit(i,"[/]"),`[`,2),"[.]",1),`[`,1),"[.]"),`[`,1))
melt(a,id.var=c("Scaffold","Atomic_Interval"))->df
my_comparisons <- list( c("CALLABLE", "POORLY_MAPPED"), c("CALLABLE", "NO_COVERAGE"), c("CALLABLE", "LOW_COVERAGE"), c("CALLABLE","REF_N") )
p <- ggboxplot(df, x = "variable", y = "value", fill = "variable", palette = "jco", facet.by = "Atomic_Interval", short.panel.labs = FALSE,ylab="Fraction of sites")+labs(fill="Callability class")
P <- p + stat_compare_means(comparisons = my_comparisons,label = "p.signif")+ stat_compare_means(label = "p.signif",label.x.npc = 0.5,label.y.npc=0.5)
pl <- ggpar(P, ylim = c(0,1.5),xlab=FALSE,legend = "right",font.y = c(14, "bold"),font.legend = c(14, "bold"),font.main = c(18,"bold.italic"))
Pl <- pl + rremove("x.text")
print(Pl+ggtitle(repclass))
}
dev.off()
| /code/fig_S1/figure_s1.r | no_license | Ajinkya-IISERB/CoalRep | R | false | false | 1,069 | r | rep = list.files(pattern=".cov.tmp.bed",full.names=TRUE)
library(reshape2)
library("ggpubr")
pdf("Figure_S1.pdf",height=15,width=20,onefile=TRUE)
for (i in rep){
a <- read.table(file=i,header=T)
repclass <- gsub("_"," ",sapply(strsplit(sapply(strsplit(sapply(strsplit(i,"[/]"),`[`,2),"[.]",1),`[`,1),"[.]"),`[`,1))
melt(a,id.var=c("Scaffold","Atomic_Interval"))->df
my_comparisons <- list( c("CALLABLE", "POORLY_MAPPED"), c("CALLABLE", "NO_COVERAGE"), c("CALLABLE", "LOW_COVERAGE"), c("CALLABLE","REF_N") )
p <- ggboxplot(df, x = "variable", y = "value", fill = "variable", palette = "jco", facet.by = "Atomic_Interval", short.panel.labs = FALSE,ylab="Fraction of sites")+labs(fill="Callability class")
P <- p + stat_compare_means(comparisons = my_comparisons,label = "p.signif")+ stat_compare_means(label = "p.signif",label.x.npc = 0.5,label.y.npc=0.5)
pl <- ggpar(P, ylim = c(0,1.5),xlab=FALSE,legend = "right",font.y = c(14, "bold"),font.legend = c(14, "bold"),font.main = c(18,"bold.italic"))
Pl <- pl + rremove("x.text")
print(Pl+ggtitle(repclass))
}
dev.off()
|
#' m-way Plot with Error Bars and Raw Data
#'
#' @description Plots results from factorial experiments. Estimated marginal
#' means and error bars are plotted in the foreground, raw data is plotted in
#' the background. Error bars can be based on different standard errors (e.g.,
#' model-based, within-subjects, between-subjects). Functions described here
#' return a \pkg{ggplot2} plot object, thus allowing further customization of
#' the plot.
#'
#' \code{afex_plot} is the user friendly function that does data preparation
#' and plotting. It also allows to only return the prepared data (\code{return
#' = "data"}).
#'
#' \code{interaction_plot} does the plotting when a \code{trace} factor is
#' present. \code{oneway_plot} does the plotting when a \code{trace} factor is
#' absent.
#'
#' @param object \code{afex_aov}, \code{mixed}, \code{merMod} or other model
#' object supported by \pkg{emmeans} (for further examples see:
#' \code{vignette("afex_plot_supported_models")}).
#' @param x A \code{character} vector or one-sided \code{formula} specifying the
#' factor names of the predictors displayed on the x-axis. \code{mapping}
#' specifies further mappings for these factors if \code{trace} is missing.
#' @param trace An optional \code{character} vector or one-sided \code{formula}
#' specifying the factor names of the predictors connected by the same line.
#' \code{mapping} specifies further mappings for these factors.
#' @param panel An optional \code{character} vector or one-sided \code{formula}
#' specifying the factor names of the predictors shown in different panels.
#' @param mapping A \code{character} vector specifying which aesthetic mappings
#' should be applied to either the \code{trace} factors (if \code{trace} is
#' specified) or the \code{x} factors. Useful options are any combination of
#' \code{"shape"}, \code{"color"}, \code{"linetype"}, or also \code{"fill"}
#' (see examples). The default (i.e., missing) uses \code{c("shape",
#' "linetype")} if \code{trace} is specified and \code{""} otherwise (i.e., no
#' additional aesthetic). If specific mappings should not be applied to
#' specific graphical elements, one can override those via the corresponding
#' further arguments. For example, for \code{data_arg} the default is
#' \code{list(color = "darkgrey")} which prevents that \code{"color"} is
#' mapped onto points in the background.
#' @param error A scalar \code{character} vector specifying on which standard
#' error the error bars should be based. Default is \code{"model"}, which
#' plots model-based standard errors. Further options are: \code{"none"} (or
#' \code{NULL}), \code{"mean"}, \code{"within"} (or \code{"CMO"}), and
#' \code{"between"}. See details.
#' @param id An optional \code{character} vector specifying over which variables
#' the raw data should be aggregated. Only relevant for \code{mixed},
#' \code{merMod}, and \code{default} method. The default (missing) uses all
#' random effects grouping factors (for \code{mixed} and \code{merMod} method)
#' or assumes all data points are independent. This can lead to many data
#' points. \code{error = "within"} or \code{error = "between"} require that
#' \code{id} is of length 1. See examples.
#' @param dv An optional scalar \code{character} vector giving the name of the
#' column containing the dependent variable for the \code{afex_plot.default}
#' method. If missing, the function attempts to take it from the \code{call}
#' slot of \code{object}. This is also used as y-axis label.
#' @param error_ci Logical. Should error bars plot confidence intervals
#' (=\code{TRUE}, the default) or standard errors (=\code{FALSE})?
#' @param error_level Numeric value between 0 and 1 determing the width of the
#' confidence interval. Default is .95 corresponding to a 95\% confidence
#' interval.
#' @param error_arg A \code{list} of further arguments passed to
#' \code{\link[ggplot2]{geom_errorbar}}, which draws the errorsbars. Default
#' is \code{list(width = 0)} which suppresses the vertical bars at the end of
#' the error bar.
#' @param data_plot \code{logical}. Should raw data be plotted in the
#' background? Default is \code{TRUE}.
#' @param data_geom Geom \code{function} used for plotting data in background.
#' The default (missing) uses \code{\link[ggplot2]{geom_point}} if
#' \code{trace} is specified, otherwise
#' \code{\link[ggbeeswarm]{geom_beeswarm}}. See examples fo further options.
#' @param data_alpha numeric \code{alpha} value between 0 and 1 passed to
#' \code{data_geom}. Default is \code{0.5} which correspond to semitransparent
#' data points in the background such that overlapping data points are plotted
#' darker.
#' @param data_arg A \code{list} of further arguments passed to
#' \code{data_geom}. Default is \code{list(color = "darkgrey")}, which plots
#' points in the background in grey.
#' @param point_arg,line_arg A \code{list} of further arguments passed to
#' \code{\link[ggplot2]{geom_point}} or \code{\link[ggplot2]{geom_line}} which
#' draw the points and lines in the foreground. Default is \code{list()}.
#' \code{line_arg} is only used if \code{trace} is specified.
#' @param emmeans_arg A \code{list} of further arguments passed to
#' \code{\link[emmeans]{emmeans}}. Of particular importance for ANOVAs is
#' \code{model}, see \code{\link{afex_aov-methods}}.
#' @param dodge Numerical amount of dodging of factor-levels on x-axis. Default
#' is \code{0.5}.
#' @param return A scalar \code{character} specifying what should be returned.
#' The default \code{"plot"} returns the \pkg{ggplot2} plot. The other option
#' \code{"data"} returns a list with two \code{data.frame}s containing the
#' data used for plotting: \code{means} contains the means and standard errors
#' for the foreground, \code{data} contains the raw data in the background.
#' @param factor_levels A \code{list} of new factor levels that should be used in
#' the plot. The name of each list entry needs to correspond to one of the
#' factors in the plot.
#' @param legend_title A scalar \code{character} vector with a new title for the
#' legend.
#' @param data For the \code{afex_plot.default} method, an optional
#' \code{data.frame} containing the raw data used for fitting the model and
#' which will be used as basis for the data points in the background. If
#' missing, it will be attempted to obtain it from the model via
#' \code{\link[emmeans]{recover_data}}. For the plotting functions, a
#' \code{data.frame} with the data that has to be passed and contains the
#' background data points.
#' @param within_vars,between_vars For the \code{afex_plot.default} method, an
#' optional \code{character} vector specifying which variables should be
#' treated as within-subjects (or repeated-measures) factors and which as
#' between-subjects (or independen-sampels) factors. If one of the two
#' arguments is given, all other factors are assumed to fall into the other
#' category.
#' @param means \code{data.frame}s used for plotting of the plotting
#' functions.
#' @param col_y,col_x,col_trace,col_panel A scalar \code{character} string
#' specifying the name of the corresponding column containing the information
#' used for plotting. Each column needs to exist in both the \code{means} and
#' the \code{data} \code{data.frame}.
#'@param col_lower,col_upper A scalar \code{character} string specifying the
#' name of the columns containing lower and upper bounds for the error bars.
#' These columns need to exist in \code{means}.
#' @param error_plot \code{logical}. Should error bars be plotted? Only used in
#' plotting functions. To suppress plotting of error bars use \code{error =
#' "none"} in \code{afex_plot}.
#' @param ... currently ignored.
#'
#' @details \code{afex_plot} obtains the estimated marginal means via
#' \code{\link[emmeans]{emmeans}} and aggregates the raw data to the same
#' level. It then calculates the desired confidence interval or standard error
#' (see below) and passes the prepared data to one of the two plotting
#' functions: \code{interaction_plot} when \code{trace} is specified and
#' \code{oneway_plot} otherwise.
#'
#' \subsection{Error Bars}{Error bars provide a grahical representation of the
#' variability of the estimated means and should be routinely added to results
#' figures. However, there exist several possibilities which particular
#' measure of variability to use. Because of this, any figure depicting error
#' bars should be accompanied by a note detailing which measure the error bars
#' shows. The present functions allow plotting of different types of
#' confidence intervals (if \code{error_ci = TRUE}, the default) or standard
#' errors (if \code{error_ci = FALSE}).
#'
#' A further complication is that readers routinely misinterpret confidence
#' intervals. The most common error is to assume that non-overlapping error
#' bars indicate a significant difference (e.g., Belia et al., 2005). This is
#' rarely the case (see e.g., Cumming & Finch, 2005; Knol et al., 2011;
#' Schenker & Gentleman, 2005). For example, in a fully between-subjects design
#' in which the error bars depict 95\% confidence intervals and groups are of
#' approximately equal size and have equal variance, even error bars that
#' overlap by as much as 50\% still correspond to \emph{p} < .05. Error bars
#' that are just touching roughly correspond to \emph{p} = .01.
#'
#' In the case of designs involving repeated-measures factors the usual
#' confidence intervals or standard errors (i.e., model-based confidence
#' intervals or intervals based on the standard error of the mean) cannot be
#' used to gauge significant differences as this requires knowledge about the
#' correlation between measures. One popular alternative in the psychological
#' literature are intervals based on within-subjects standard
#' errors/confidence intervals (e.g., Cousineau & O'Brien, 2014). These
#' attempt to control for the correlation across individuals and thereby allow
#' judging differences between repeated-measures condition. As a downside,
#' when using within-subjects intervals no comparisons across between-subjects
#' conditions or with respect to a fixed-value are possible anymore.
#'
#' In the case of a mixed-design, no single type of error bar is possible that
#' allows comparison across all conditions. Likewise, for mixed models
#' involving multiple \emph{crossed} random effects, no single set of error
#' bars (or even data aggregation) adequately represent the true varibility in
#' the data and adequately allows for "inference by eye". Therefore, special
#' care is necessary in such cases. One possiblity is to avoid error bars
#' altogether and plot only the raw data in the background (with \code{error =
#' "none"}). The raw data in the background still provides a visual impression
#' of the variability in the data and the precision of the mean estimate, but
#' does not as easily suggest an incorrect inferences. Another possibility is
#' to use the model-based standard error and note in the figure caption that
#' it does not permit comparisons across repeated-measures factors.
#'
#' The following "rules of eye" (Cumming and Finch, 2005) hold, when permitted
#' by design (i.e., within-subjects bars for within-subjects comparisons;
#' other variants for between-subjects comparisons), and groups are
#' approximately equal in size and variance. Note that for more complex
#' designs ususally analyzed with mixed models, such as designs involving
#' complicated dependencies across data points, these rules of thumbs may be
#' highly misleading.
#' \itemize{
#' \item \emph{p} < .05 when the overlap of the 95\% confidence intervals
#' (CIs) is no more than about half the average margin of error, that is,
#' when proportion overlap is about .50 or less.
#' \item \emph{p} < .01 when the two CIs do not overlap, that is, when
#' proportion overlap is about 0 or there is a positive gap.
#' \item \emph{p} < .05 when the gap between standard error (SE) bars is at
#' least about the size of the average SE, that is, when the proportion gap
#' is about 1 or greater.
#' \item \emph{p} < .01 when the proportion gap between SE bars is about 2
#' or more.
#' }
#' }
#' \subsection{Implemented Standard Errors}{The following lists the
#' implemented approaches to calculate confidence intervals (CIs) and standard
#' errors (SEs). CIs are based on the SEs using the \emph{t}-distribution with
#' degrees of freedom based on the cell or group size. For ANOVA models,
#' \code{afex_plot} attempts to warn in case the chosen approach is misleading
#' given the design (e.g., model-based error bars for purely
#' within-subjects plots). For \code{mixed} models, no such warnings are
#' produced, but users should be aware that all options beside \code{"model"}
#' are not actually appropriate and have only heuristic value. But then again,
#' \code{"model"} based error bars do not permit comparisons for factors
#' varying within one of the random-effects grouping factors (i.e., factors
#' for which random-slopes should be estimated).
#' \describe{
#' \item{\code{"model"}}{Uses model-based CIs and SEs. For ANOVAs, the
#' variant based on the \code{lm} or \code{mlm} model (i.e.,
#' \code{emmeans_arg = list(model = "multivariate")}) seems generally
#' preferrable.}
#' \item{\code{"mean"}}{Calculates the standard error of the mean for
#' each cell ignoring any repeated-measures factors.}
#' \item{\code{"within"} or \code{"CMO"}}{Calculates within-subjects SEs
#' using the Cosineau-Morey-O'Brien (Cousineau & O'Brien, 2014) method. This
#' method is based on a double normalization of the data. SEs and CIs are
#' then calculated independently for each cell (i.e., if the desired output
#' contains between-subjects factors, SEs are calculated for each cell
#' including the between-subjects factors).}
#' \item{\code{"between"}}{First aggregates the data per participant and
#' then calculates the SEs for each between-subjects condition. Results in
#' one SE and \emph{t}-quantile for all conditions in purely within-subjects
#' designs.}
#' \item{\code{"none"} or \code{NULL}}{Suppresses calculation of SEs and
#' plots no error bars.}
#' }
#' For \code{mixed} models, the within-subjects/repeated-measures factors are
#' relative to the chosen \code{id} effects grouping factor. They are
#' automatically detected based on the random-slopes of the random-effects
#' grouping factor in \code{id}. All other factors are treated as
#' independent-samples or between-subjects factors.
#' }
#'
#' @return Returns a \pkg{ggplot2} plot (i.e., object of class \code{c("gg",
#' "ggplot")}) unless \code{return = "data"}.
#'
#' @references Belia, S., Fidler, F., Williams, J., & Cumming, G. (2005).
#' Researchers Misunderstand Confidence Intervals and Standard Error Bars.
#' \emph{Psychological Methods}, 10(4), 389-396.
#' https://doi.org/10.1037/1082-989X.10.4.389
#'
#' Cousineau, D., & O'Brien, F. (2014). Error bars in within-subject designs:
#' a comment on Baguley (2012). \emph{Behavior Research Methods}, 46(4),
#' 1149-1151. https://doi.org/10.3758/s13428-013-0441-z
#'
#' Cumming, G., & Finch, S. (2005). Inference by Eye: Confidence Intervals and
#' How to Read Pictures of Data. \emph{American Psychologist}, 60(2), 170-180.
#' https://doi.org/10.1037/0003-066X.60.2.170
#'
#' Knol, M. J., Pestman, W. R., & Grobbee, D. E. (2011). The (mis)use of
#' overlap of confidence intervals to assess effect modification.
#' \emph{European Journal of Epidemiology}, 26(4), 253-254.
#' https://doi.org/10.1007/s10654-011-9563-8
#'
#' Schenker, N., & Gentleman, J. F. (2001). On Judging the Significance of
#' Differences by Examining the Overlap Between Confidence Intervals.
#' \emph{The American Statistician}, 55(3), 182-186.
#' https://doi.org/10.1198/000313001317097960
#'
#'
#' @importFrom stats aggregate sd qt
#'
#' @example examples/examples.afex_plot.R
#'
#' @export
afex_plot <- function(object, ...) UseMethod("afex_plot", object)
# @method afex_plot afex_aov
#' @rdname afex_plot
#' @export
afex_plot.afex_aov <- function(object,
x,
trace,
panel,
mapping,
error = "model",
error_ci = TRUE,
error_level = 0.95,
error_arg = list(width = 0),
data_plot = TRUE,
data_geom,
data_alpha = 0.5,
data_arg = list(color = "darkgrey"),
point_arg = list(),
line_arg = list(),
emmeans_arg = list(),
dodge = 0.5,
return = "plot",
factor_levels = list(),
legend_title,
...) {
return <- match.arg(return, c("plot", "data"))
error <- match.arg(error, c("none",
"model",
"mean",
"within", "CMO",
"between"))
dots <- list(...)
if (length(dots) > 0) {
warning("Additional arguments ignored: ",
paste(names(dots), collapse = ", "), call. = FALSE)
}
x <- get_plot_var(x)
trace <- get_plot_var(trace)
panel <- get_plot_var(panel)
all_vars <- c(x, trace, panel)
emms <- get_emms(object = object,
x = x,
trace = trace,
panel = panel,
emmeans_arg = emmeans_arg,
factor_levels = factor_levels,
level = error_level)
## prepare raw (i.e., participant by cell) data
data <- prep_data(object$data$long,
x = x,
trace = trace,
panel = panel,
factor_levels = factor_levels,
dv_col = attr(object, "dv"),
id = attr(object, "id"))
### prepare variables for SE/CI calculation
within_vars <- all_vars[all_vars %in% names(attr(object, "within"))]
between_vars <- all_vars[all_vars %in% names(attr(object, "between"))]
### check if error bars are consistent with panel(s) and warn otherwise
if (error %in% c("model", "mean", "between") &&
all(c(x, trace) %in% within_vars)) {
warning("Panel(s) show within-subjects factors, ",
"but not within-subjects error bars.\n",
'For within-subjects error bars use: error = "within"',
call. = FALSE)
} else if (error %in% c("within", "CMO") &&
all(c(x, trace) %in% between_vars)) {
warning("Panel(s) show between-subjects factors, ",
"but within-subjects error bars.\n",
'For between-subjects error bars use e.g.,: ',
'error = "model" or error = "mean"',
call. = FALSE)
} else if (any(between_vars %in% c(x, trace)) &&
any(within_vars %in% c(x, trace)) &&
error != "none") {
warning("Panel(s) show a mixed within-between-design.\n",
"Error bars do not allow comparisons across all means.\n",
'Suppress error bars with: error = "none"',
call. = FALSE)
}
tmp <- get_data_based_cis(emms = emms,
data = data,
error = error,
id = attr(object, "id"), ## colname holding the id/grouping variable
all_vars = all_vars,
within_vars = within_vars,
between_vars = between_vars,
error_level = error_level,
error_ci = error_ci)
emms <- tmp$emms
error_plot <- tmp$error_plot
return(afex_plot_internal(x = x,
trace = trace,
panel = panel,
means = emms,
data = data,
error_plot = error_plot,
error_arg = error_arg,
dodge = dodge,
data_plot = data_plot,
data_geom = data_geom,
data_alpha = data_alpha,
data_arg = data_arg,
point_arg = point_arg,
line_arg = line_arg,
mapping = mapping,
legend_title = legend_title,
return = return
))
}
# @method afex_plot afex_aov
#' @rdname afex_plot
#' @export
afex_plot.mixed <- function(object,
x,
trace,
panel,
mapping,
id,
error = "model",
error_ci = TRUE,
error_level = 0.95,
error_arg = list(width = 0),
data_plot = TRUE,
data_geom,
data_alpha = 0.5,
data_arg = list(color = "darkgrey"),
point_arg = list(),
line_arg = list(),
emmeans_arg = list(),
dodge = 0.5,
return = "plot",
factor_levels = list(),
legend_title,
...) {
return <- match.arg(return, c("plot", "data"))
error <- match.arg(error, c("none",
"model",
"mean",
"within", "CMO",
"between"))
dots <- list(...)
if (length(dots) > 0) {
warning("Additional arguments ignored: ",
paste(names(dots), collapse = ", "), call. = FALSE)
}
x <- get_plot_var(x)
trace <- get_plot_var(trace)
panel <- get_plot_var(panel)
all_vars <- c(x, trace, panel)
if (missing(id)) {
id <- unique(names(lme4::ranef(object$full_model)))
message("Aggregating data over: ", paste(id, collapse = ", "))
}
## prepare raw (i.e., participant by cell) data
data <- prep_data(object$data,
x = x,
trace = trace,
panel = panel,
factor_levels = factor_levels,
dv_col = deparse(object$full_model@call[["formula"]][[2]]),
id = id)
data$afex_id <- interaction(data[id], sep = ".")
if (!(error %in% c("none" ,"model", "mean")) &
(length(id) > 1)) {
stop("When aggregating over multiple random effects,\n",
' error has to be in: c("model", "mean", "none")',
call. = FALSE)
}
emms <- get_emms(object = object,
x = x,
trace = trace,
panel = panel,
emmeans_arg = emmeans_arg,
factor_levels = factor_levels,
level = error_level)
attr(emms, "dv") <- deparse(object$full_model@call[["formula"]][[2]])
if (length(id) == 1) {
all_within <- lapply(lme4::findbars(object$call), all.vars)
all_within <-
unique(unlist(
all_within[vapply(all_within, function(x) id %in% x, NA)]
))
all_within <- all_within[all_within != id]
within_vars <- all_vars[all_vars %in% all_within]
between_vars <- all_vars[!(all_vars %in% within_vars)]
}
### prepare variables for SE/CI calculation
tmp <- get_data_based_cis(emms = emms,
data = data,
error = error,
id = "afex_id", ## colname holding the id/grouping variable
all_vars = all_vars,
within_vars = within_vars,
between_vars = between_vars,
error_level = error_level,
error_ci = error_ci)
emms <- tmp$emms
error_plot <- tmp$error_plot
return(afex_plot_internal(x = x,
trace = trace,
panel = panel,
means = emms,
data = data,
error_plot = error_plot,
error_arg = error_arg,
dodge = dodge,
data_plot = data_plot,
data_geom = data_geom,
data_alpha = data_alpha,
data_arg = data_arg,
point_arg = point_arg,
line_arg = line_arg,
mapping = mapping,
legend_title = legend_title,
return = return
))
}
#' @rdname afex_plot
#' @export
afex_plot.merMod <- function(object,
x,
trace,
panel,
mapping,
id,
error = "model",
error_ci = TRUE,
error_level = 0.95,
error_arg = list(width = 0),
data_plot = TRUE,
data_geom,
data_alpha = 0.5,
data_arg = list(color = "darkgrey"),
point_arg = list(),
line_arg = list(),
emmeans_arg = list(),
dodge = 0.5,
return = "plot",
factor_levels = list(),
legend_title,
...) {
return <- match.arg(return, c("plot", "data"))
error <- match.arg(error, c("none",
"model",
"mean",
"within", "CMO",
"between"))
dots <- list(...)
if (length(dots) > 0) {
warning("Additional arguments ignored: ",
paste(names(dots), collapse = ", "), call. = FALSE)
}
x <- get_plot_var(x)
trace <- get_plot_var(trace)
panel <- get_plot_var(panel)
all_vars <- c(x, trace, panel)
if (missing(id)) {
id <- unique(names(lme4::ranef(object)))
message("Aggregating data over: ", paste(id, collapse = ", "))
}
## prepare raw (i.e., participant by cell) data
data <- prep_data(
data = emmeans::recover_data(
object = object,
trms = terms(object, fixed.only = FALSE)
),
x = x,
trace = trace,
panel = panel,
factor_levels = factor_levels,
dv_col = deparse(object@call[["formula"]][[2]]),
id = id)
data$afex_id <- interaction(data[id], sep = ".")
if (!(error %in% c("none" ,"model", "mean")) &
(length(id) > 1)) {
stop("When aggregating over multiple random effects,\n",
' error has to be in: c("model", "mean", "none")',
call. = FALSE)
}
emms <- get_emms(object = object,
x = x,
trace = trace,
panel = panel,
emmeans_arg = emmeans_arg,
factor_levels = factor_levels,
level = error_level)
attr(emms, "dv") <- deparse(object@call[["formula"]][[2]])
if (length(id) == 1) {
all_within <- lapply(lme4::findbars(object@call), all.vars)
all_within <-
unique(unlist(
all_within[vapply(all_within, function(x) id %in% x, NA)]
))
all_within <- all_within[all_within != id]
within_vars <- all_vars[all_vars %in% all_within]
between_vars <- all_vars[!(all_vars %in% within_vars)]
}
### prepare variables for SE/CI calculation
tmp <- get_data_based_cis(emms = emms,
data = data,
error = error,
id = "afex_id", ## colname holding the id/grouping variable
all_vars = all_vars,
within_vars = within_vars,
between_vars = between_vars,
error_level = error_level,
error_ci = error_ci)
emms <- tmp$emms
error_plot <- tmp$error_plot
return(afex_plot_internal(x = x,
trace = trace,
panel = panel,
means = emms,
data = data,
error_plot = error_plot,
error_arg = error_arg,
dodge = dodge,
data_plot = data_plot,
data_geom = data_geom,
data_alpha = data_alpha,
data_arg = data_arg,
point_arg = point_arg,
line_arg = line_arg,
mapping = mapping,
legend_title = legend_title,
return = return
))
}
#' @rdname afex_plot
#' @export
afex_plot.default <- function(object,
x,
trace,
panel,
mapping,
id,
dv,
data,
within_vars,
between_vars,
error = "model",
error_ci = TRUE,
error_level = 0.95,
error_arg = list(width = 0),
data_plot = TRUE,
data_geom,
data_alpha = 0.5,
data_arg = list(color = "darkgrey"),
point_arg = list(),
line_arg = list(),
emmeans_arg = list(),
dodge = 0.5,
return = "plot",
factor_levels = list(),
legend_title,
...) {
return <- match.arg(return, c("plot", "data"))
error <- match.arg(error, c("none",
"model",
"mean",
"within", "CMO",
"between"))
dots <- list(...)
if (length(dots) > 0) {
warning("Additional arguments ignored: ",
paste(names(dots), collapse = ", "), call. = FALSE)
}
x <- get_plot_var(x)
trace <- get_plot_var(trace)
panel <- get_plot_var(panel)
all_vars <- c(x, trace, panel)
if (missing(dv)) {
formula_name <- names(object$call)[2]
message("dv column detected: ", deparse(object$call[[formula_name]][[2]]))
dv <- deparse(object$call[[formula_name]][[2]])
}
## prepare raw (i.e., participant by cell) data if missing
if (missing(data)) {
data <- emmeans::recover_data(
object = object,
trms = terms(object)
)
}
if (missing(id)) {
message("No id column passed. ",
"Assuming all rows are independent samples.")
data$id <- factor(seq_len(nrow(data)))
id <- "id"
}
data <- prep_data(
data = data,
x = x,
trace = trace,
panel = panel,
factor_levels = factor_levels,
dv_col = dv,
id = id)
data$afex_id <- interaction(data[id], sep = ".")
if (!(error %in% c("none" ,"model", "mean")) &
(length(id) > 1)) {
stop("When aggregating over multiple ids,\n",
' error has to be in: c("model", "mean", "none")',
call. = FALSE)
}
emms <- get_emms(object = object,
x = x,
trace = trace,
panel = panel,
emmeans_arg = emmeans_arg,
factor_levels = factor_levels,
level = error_level)
attr(emms, "dv") <- dv
if (missing(within_vars) & !missing(between_vars)) {
within_vars <- all_vars[!(all_vars %in% within_vars)]
}
if (!missing(within_vars) & missing(between_vars)) {
between_vars <- all_vars[!(all_vars %in% between_vars)]
}
### prepare variables for SE/CI calculation
tmp <- get_data_based_cis(emms = emms,
data = data,
error = error,
id = "afex_id", ## colname holding the id/grouping variable
all_vars = all_vars,
within_vars = within_vars,
between_vars = between_vars,
error_level = error_level,
error_ci = error_ci)
emms <- tmp$emms
error_plot <- tmp$error_plot
return(afex_plot_internal(x = x,
trace = trace,
panel = panel,
means = emms,
data = data,
error_plot = error_plot,
error_arg = error_arg,
dodge = dodge,
data_plot = data_plot,
data_geom = data_geom,
data_alpha = data_alpha,
data_arg = data_arg,
point_arg = point_arg,
line_arg = line_arg,
mapping = mapping,
legend_title = legend_title,
return = return
))
}
###if(getRversion() >= "2.15.1") utils::globalVariables(c("error", "y", "x"))
#' @rdname afex_plot
#' @export
interaction_plot <- function(means,
data,
mapping = c("shape", "lineytpe"),
error_plot = TRUE,
error_arg = list(width = 0),
data_plot = TRUE,
data_geom = ggplot2::geom_point,
data_alpha = 0.5,
data_arg = list(color = "darkgrey"),
point_arg = list(),
line_arg = list(),
dodge = 0.5,
legend_title,
col_x = "x",
col_y = "y",
col_trace = "trace",
col_panel = "panel",
col_lower = "lower",
col_upper = "upper") {
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("package ggplot2 is required.", call. = FALSE)
}
if (missing(mapping)) {
mapping <- c('shape', 'linetype')
} else if (length(mapping) == 0) {
stop("mapping cannot be empty. Possible values: 'shape', 'color', 'linetype'.",
call. = FALSE)
}
tmp_list <- as.list(rep(col_trace, length(mapping)))
names(tmp_list) <- mapping
error_mapping <- mapping[!(mapping %in% c("linetype", "shape", "fill"))]
tmp_list_error <- as.list(rep(col_trace, length(error_mapping)))
names(tmp_list_error) <- error_mapping
plot_out <- ggplot2::ggplot(data = means,
mapping = do.call(
what = ggplot2::aes_string,
args = c(list(
y = col_y,
x = col_x,
group = col_trace),
tmp_list)))
if (data_plot) {
if (missing(data_geom)) {
data_geom <- ggplot2::geom_point
}
data_arg$alpha <- data_alpha
if (!("position" %in% names(data_arg)) &
("position" %in% names(formals(data_geom)))) {
data_arg$position = ggplot2::position_dodge(width = dodge)
}
plot_out <- plot_out +
do.call(what = data_geom,
args = c(
#mapping = list(ggplot2::aes(group = interaction(x, trace))),
mapping =
list(
ggplot2::aes_string(
group =
paste0("interaction(",
paste0(c(col_x, col_trace), collapse = ", "),
")")
)),
data = list(data),
data_arg
)
)
}
for (i in levels(data$trace)) {
tmp_means <- means
tmp_means[means$trace != i, c(col_y, col_lower, col_upper)] <- NA
#tmp_means <- tmp_means[means$trace == i,]
plot_out <- plot_out +
do.call(what = ggplot2::geom_point,
args = c(
data = list(tmp_means),
position = list(
ggplot2::position_dodge(width = dodge)
),
point_arg,
na.rm = list(TRUE)
)) +
do.call(what = ggplot2::geom_line,
args = c(
data = list(tmp_means),
position = list(
ggplot2::position_dodge(width = dodge)
),
line_arg,
na.rm = list(TRUE)
))
if (error_plot) {
plot_out <- plot_out +
do.call(what = ggplot2::geom_errorbar,
args = c(
data = list(tmp_means),
mapping = list(do.call(
what = ggplot2::aes_string,
args = c(list(
x = col_x,
ymin = col_lower,
ymax = col_upper,
group = col_trace),
tmp_list_error))),
position = list(ggplot2::position_dodge(width = dodge)),
error_arg,
na.rm = list(TRUE),
inherit.aes = list(FALSE)
))
}
}
if (length(unique(means$panel)) > 1) {
plot_out <- plot_out +
ggplot2::facet_wrap(facets = "panel")
}
## add labels
if (!is.null(attr(means, "dv"))) {
plot_out <- plot_out +
ggplot2::ylab(attr(means, "dv"))
}
if (!is.null(attr(means, "x"))) {
plot_out <- plot_out +
ggplot2::xlab(attr(means, "x"))
}
if (!missing(legend_title)) {
legend_title <- paste(legend_title, collapse = "\n")
tmp_list <- rep(list(ggplot2::guide_legend(title = legend_title)),
length(mapping))
names(tmp_list) <- mapping
plot_out <- plot_out +
do.call(what = ggplot2::guides,
args = tmp_list)
}
return(plot_out)
}
#' @rdname afex_plot
#' @export
oneway_plot <- function(means,
data,
mapping = "",
error_plot = TRUE,
error_arg = list(width = 0),
data_plot = TRUE,
data_geom = ggbeeswarm::geom_beeswarm,
data_alpha = 0.5,
data_arg = list(color = "darkgrey"),
point_arg = list(),
legend_title,
col_x = "x",
col_y = "y",
col_panel = "panel",
col_lower = "lower",
col_upper = "upper") {
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("package ggplot2 is required.", call. = FALSE)
}
if (missing(mapping)) {
mapping <- ""
}
if (length(mapping) > 1 || mapping[1] != "") {
tmp_list <- as.list(rep(col_x, length(mapping)))
names(tmp_list) <- mapping
error_mapping <- mapping[!(mapping %in% c("linetype", "shape", "fill"))]
tmp_list_error <- as.list(rep(col_x, length(error_mapping)))
names(tmp_list_error) <- error_mapping
} else {
tmp_list <- list()
tmp_list_error <- list()
}
plot_out <- ggplot2::ggplot(data = means,
mapping = do.call(
what = ggplot2::aes_string,
args = c(list(
y = col_y,
x = col_x,
group = col_x),
tmp_list)))
if (data_plot) {
if (missing(data_geom)) {
if (!requireNamespace("ggbeeswarm", quietly = TRUE)) {
stop("package ggbeeswarm is required.", call. = FALSE)
}
data_geom <- ggbeeswarm::geom_beeswarm
}
data_arg$alpha <- data_alpha
plot_out <- plot_out +
do.call(what = data_geom,
args = c(
data = list(data),
data_arg
)
)
}
plot_out <- plot_out +
do.call(what = ggplot2::geom_point,
args = point_arg)
if (error_plot) {
plot_out <- plot_out +
do.call(what = ggplot2::geom_errorbar,
args = c(
mapping = list(do.call(
what = ggplot2::aes_string,
args = c(list(
x = col_x,
ymin = col_lower,
ymax = col_upper),
tmp_list_error))),
error_arg,
inherit.aes = list(FALSE)
))
}
if (length(unique(means$panel)) > 1) {
plot_out <- plot_out +
ggplot2::facet_wrap(facets = "panel")
}
## add labels
if (!is.null(attr(means, "dv"))) {
plot_out <- plot_out +
ggplot2::ylab(attr(means, "dv"))
}
if (!is.null(attr(means, "x"))) {
plot_out <- plot_out +
ggplot2::xlab(attr(means, "x"))
}
if (!missing(legend_title)) {
legend_title <- paste(legend_title, collapse = "\n")
tmp_list <- rep(list(ggplot2::guide_legend(title = legend_title)),
length(mapping))
names(tmp_list) <- mapping
plot_out <- plot_out +
do.call(what = ggplot2::guides,
args = tmp_list)
}
return(plot_out)
}
| /R/afex_plot.R | no_license | rvlenth/afex | R | false | false | 43,440 | r | #' m-way Plot with Error Bars and Raw Data
#'
#' @description Plots results from factorial experiments. Estimated marginal
#' means and error bars are plotted in the foreground, raw data is plotted in
#' the background. Error bars can be based on different standard errors (e.g.,
#' model-based, within-subjects, between-subjects). Functions described here
#' return a \pkg{ggplot2} plot object, thus allowing further customization of
#' the plot.
#'
#' \code{afex_plot} is the user friendly function that does data preparation
#' and plotting. It also allows to only return the prepared data (\code{return
#' = "data"}).
#'
#' \code{interaction_plot} does the plotting when a \code{trace} factor is
#' present. \code{oneway_plot} does the plotting when a \code{trace} factor is
#' absent.
#'
#' @param object \code{afex_aov}, \code{mixed}, \code{merMod} or other model
#' object supported by \pkg{emmeans} (for further examples see:
#' \code{vignette("afex_plot_supported_models")}).
#' @param x A \code{character} vector or one-sided \code{formula} specifying the
#' factor names of the predictors displayed on the x-axis. \code{mapping}
#' specifies further mappings for these factors if \code{trace} is missing.
#' @param trace An optional \code{character} vector or one-sided \code{formula}
#' specifying the factor names of the predictors connected by the same line.
#' \code{mapping} specifies further mappings for these factors.
#' @param panel An optional \code{character} vector or one-sided \code{formula}
#' specifying the factor names of the predictors shown in different panels.
#' @param mapping A \code{character} vector specifying which aesthetic mappings
#' should be applied to either the \code{trace} factors (if \code{trace} is
#' specified) or the \code{x} factors. Useful options are any combination of
#' \code{"shape"}, \code{"color"}, \code{"linetype"}, or also \code{"fill"}
#' (see examples). The default (i.e., missing) uses \code{c("shape",
#' "linetype")} if \code{trace} is specified and \code{""} otherwise (i.e., no
#' additional aesthetic). If specific mappings should not be applied to
#' specific graphical elements, one can override those via the corresponding
#' further arguments. For example, for \code{data_arg} the default is
#' \code{list(color = "darkgrey")} which prevents that \code{"color"} is
#' mapped onto points in the background.
#' @param error A scalar \code{character} vector specifying on which standard
#' error the error bars should be based. Default is \code{"model"}, which
#' plots model-based standard errors. Further options are: \code{"none"} (or
#' \code{NULL}), \code{"mean"}, \code{"within"} (or \code{"CMO"}), and
#' \code{"between"}. See details.
#' @param id An optional \code{character} vector specifying over which variables
#' the raw data should be aggregated. Only relevant for \code{mixed},
#' \code{merMod}, and \code{default} method. The default (missing) uses all
#' random effects grouping factors (for \code{mixed} and \code{merMod} method)
#' or assumes all data points are independent. This can lead to many data
#' points. \code{error = "within"} or \code{error = "between"} require that
#' \code{id} is of length 1. See examples.
#' @param dv An optional scalar \code{character} vector giving the name of the
#' column containing the dependent variable for the \code{afex_plot.default}
#' method. If missing, the function attempts to take it from the \code{call}
#' slot of \code{object}. This is also used as y-axis label.
#' @param error_ci Logical. Should error bars plot confidence intervals
#' (=\code{TRUE}, the default) or standard errors (=\code{FALSE})?
#' @param error_level Numeric value between 0 and 1 determing the width of the
#' confidence interval. Default is .95 corresponding to a 95\% confidence
#' interval.
#' @param error_arg A \code{list} of further arguments passed to
#' \code{\link[ggplot2]{geom_errorbar}}, which draws the errorsbars. Default
#' is \code{list(width = 0)} which suppresses the vertical bars at the end of
#' the error bar.
#' @param data_plot \code{logical}. Should raw data be plotted in the
#' background? Default is \code{TRUE}.
#' @param data_geom Geom \code{function} used for plotting data in background.
#' The default (missing) uses \code{\link[ggplot2]{geom_point}} if
#' \code{trace} is specified, otherwise
#' \code{\link[ggbeeswarm]{geom_beeswarm}}. See examples fo further options.
#' @param data_alpha numeric \code{alpha} value between 0 and 1 passed to
#' \code{data_geom}. Default is \code{0.5} which correspond to semitransparent
#' data points in the background such that overlapping data points are plotted
#' darker.
#' @param data_arg A \code{list} of further arguments passed to
#' \code{data_geom}. Default is \code{list(color = "darkgrey")}, which plots
#' points in the background in grey.
#' @param point_arg,line_arg A \code{list} of further arguments passed to
#' \code{\link[ggplot2]{geom_point}} or \code{\link[ggplot2]{geom_line}} which
#' draw the points and lines in the foreground. Default is \code{list()}.
#' \code{line_arg} is only used if \code{trace} is specified.
#' @param emmeans_arg A \code{list} of further arguments passed to
#' \code{\link[emmeans]{emmeans}}. Of particular importance for ANOVAs is
#' \code{model}, see \code{\link{afex_aov-methods}}.
#' @param dodge Numerical amount of dodging of factor-levels on x-axis. Default
#' is \code{0.5}.
#' @param return A scalar \code{character} specifying what should be returned.
#' The default \code{"plot"} returns the \pkg{ggplot2} plot. The other option
#' \code{"data"} returns a list with two \code{data.frame}s containing the
#' data used for plotting: \code{means} contains the means and standard errors
#' for the foreground, \code{data} contains the raw data in the background.
#' @param factor_levels A \code{list} of new factor levels that should be used in
#' the plot. The name of each list entry needs to correspond to one of the
#' factors in the plot.
#' @param legend_title A scalar \code{character} vector with a new title for the
#' legend.
#' @param data For the \code{afex_plot.default} method, an optional
#' \code{data.frame} containing the raw data used for fitting the model and
#' which will be used as basis for the data points in the background. If
#' missing, it will be attempted to obtain it from the model via
#' \code{\link[emmeans]{recover_data}}. For the plotting functions, a
#' \code{data.frame} with the data that has to be passed and contains the
#' background data points.
#' @param within_vars,between_vars For the \code{afex_plot.default} method, an
#' optional \code{character} vector specifying which variables should be
#' treated as within-subjects (or repeated-measures) factors and which as
#' between-subjects (or independen-sampels) factors. If one of the two
#' arguments is given, all other factors are assumed to fall into the other
#' category.
#' @param means \code{data.frame}s used for plotting of the plotting
#' functions.
#' @param col_y,col_x,col_trace,col_panel A scalar \code{character} string
#' specifying the name of the corresponding column containing the information
#' used for plotting. Each column needs to exist in both the \code{means} and
#' the \code{data} \code{data.frame}.
#'@param col_lower,col_upper A scalar \code{character} string specifying the
#' name of the columns containing lower and upper bounds for the error bars.
#' These columns need to exist in \code{means}.
#' @param error_plot \code{logical}. Should error bars be plotted? Only used in
#' plotting functions. To suppress plotting of error bars use \code{error =
#' "none"} in \code{afex_plot}.
#' @param ... currently ignored.
#'
#' @details \code{afex_plot} obtains the estimated marginal means via
#' \code{\link[emmeans]{emmeans}} and aggregates the raw data to the same
#' level. It then calculates the desired confidence interval or standard error
#' (see below) and passes the prepared data to one of the two plotting
#' functions: \code{interaction_plot} when \code{trace} is specified and
#' \code{oneway_plot} otherwise.
#'
#' \subsection{Error Bars}{Error bars provide a grahical representation of the
#' variability of the estimated means and should be routinely added to results
#' figures. However, there exist several possibilities which particular
#' measure of variability to use. Because of this, any figure depicting error
#' bars should be accompanied by a note detailing which measure the error bars
#' shows. The present functions allow plotting of different types of
#' confidence intervals (if \code{error_ci = TRUE}, the default) or standard
#' errors (if \code{error_ci = FALSE}).
#'
#' A further complication is that readers routinely misinterpret confidence
#' intervals. The most common error is to assume that non-overlapping error
#' bars indicate a significant difference (e.g., Belia et al., 2005). This is
#' rarely the case (see e.g., Cumming & Finch, 2005; Knol et al., 2011;
#' Schenker & Gentleman, 2005). For example, in a fully between-subjects design
#' in which the error bars depict 95\% confidence intervals and groups are of
#' approximately equal size and have equal variance, even error bars that
#' overlap by as much as 50\% still correspond to \emph{p} < .05. Error bars
#' that are just touching roughly correspond to \emph{p} = .01.
#'
#' In the case of designs involving repeated-measures factors the usual
#' confidence intervals or standard errors (i.e., model-based confidence
#' intervals or intervals based on the standard error of the mean) cannot be
#' used to gauge significant differences as this requires knowledge about the
#' correlation between measures. One popular alternative in the psychological
#' literature are intervals based on within-subjects standard
#' errors/confidence intervals (e.g., Cousineau & O'Brien, 2014). These
#' attempt to control for the correlation across individuals and thereby allow
#' judging differences between repeated-measures condition. As a downside,
#' when using within-subjects intervals no comparisons across between-subjects
#' conditions or with respect to a fixed-value are possible anymore.
#'
#' In the case of a mixed-design, no single type of error bar is possible that
#' allows comparison across all conditions. Likewise, for mixed models
#' involving multiple \emph{crossed} random effects, no single set of error
#' bars (or even data aggregation) adequately represent the true varibility in
#' the data and adequately allows for "inference by eye". Therefore, special
#' care is necessary in such cases. One possiblity is to avoid error bars
#' altogether and plot only the raw data in the background (with \code{error =
#' "none"}). The raw data in the background still provides a visual impression
#' of the variability in the data and the precision of the mean estimate, but
#' does not as easily suggest an incorrect inferences. Another possibility is
#' to use the model-based standard error and note in the figure caption that
#' it does not permit comparisons across repeated-measures factors.
#'
#' The following "rules of eye" (Cumming and Finch, 2005) hold, when permitted
#' by design (i.e., within-subjects bars for within-subjects comparisons;
#' other variants for between-subjects comparisons), and groups are
#' approximately equal in size and variance. Note that for more complex
#' designs ususally analyzed with mixed models, such as designs involving
#' complicated dependencies across data points, these rules of thumbs may be
#' highly misleading.
#' \itemize{
#' \item \emph{p} < .05 when the overlap of the 95\% confidence intervals
#' (CIs) is no more than about half the average margin of error, that is,
#' when proportion overlap is about .50 or less.
#' \item \emph{p} < .01 when the two CIs do not overlap, that is, when
#' proportion overlap is about 0 or there is a positive gap.
#' \item \emph{p} < .05 when the gap between standard error (SE) bars is at
#' least about the size of the average SE, that is, when the proportion gap
#' is about 1 or greater.
#' \item \emph{p} < .01 when the proportion gap between SE bars is about 2
#' or more.
#' }
#' }
#' \subsection{Implemented Standard Errors}{The following lists the
#' implemented approaches to calculate confidence intervals (CIs) and standard
#' errors (SEs). CIs are based on the SEs using the \emph{t}-distribution with
#' degrees of freedom based on the cell or group size. For ANOVA models,
#' \code{afex_plot} attempts to warn in case the chosen approach is misleading
#' given the design (e.g., model-based error bars for purely
#' within-subjects plots). For \code{mixed} models, no such warnings are
#' produced, but users should be aware that all options beside \code{"model"}
#' are not actually appropriate and have only heuristic value. But then again,
#' \code{"model"} based error bars do not permit comparisons for factors
#' varying within one of the random-effects grouping factors (i.e., factors
#' for which random-slopes should be estimated).
#' \describe{
#' \item{\code{"model"}}{Uses model-based CIs and SEs. For ANOVAs, the
#' variant based on the \code{lm} or \code{mlm} model (i.e.,
#' \code{emmeans_arg = list(model = "multivariate")}) seems generally
#' preferrable.}
#' \item{\code{"mean"}}{Calculates the standard error of the mean for
#' each cell ignoring any repeated-measures factors.}
#' \item{\code{"within"} or \code{"CMO"}}{Calculates within-subjects SEs
#' using the Cosineau-Morey-O'Brien (Cousineau & O'Brien, 2014) method. This
#' method is based on a double normalization of the data. SEs and CIs are
#' then calculated independently for each cell (i.e., if the desired output
#' contains between-subjects factors, SEs are calculated for each cell
#' including the between-subjects factors).}
#' \item{\code{"between"}}{First aggregates the data per participant and
#' then calculates the SEs for each between-subjects condition. Results in
#' one SE and \emph{t}-quantile for all conditions in purely within-subjects
#' designs.}
#' \item{\code{"none"} or \code{NULL}}{Suppresses calculation of SEs and
#' plots no error bars.}
#' }
#' For \code{mixed} models, the within-subjects/repeated-measures factors are
#' relative to the chosen \code{id} effects grouping factor. They are
#' automatically detected based on the random-slopes of the random-effects
#' grouping factor in \code{id}. All other factors are treated as
#' independent-samples or between-subjects factors.
#' }
#'
#' @return Returns a \pkg{ggplot2} plot (i.e., object of class \code{c("gg",
#' "ggplot")}) unless \code{return = "data"}.
#'
#' @references Belia, S., Fidler, F., Williams, J., & Cumming, G. (2005).
#' Researchers Misunderstand Confidence Intervals and Standard Error Bars.
#' \emph{Psychological Methods}, 10(4), 389-396.
#' https://doi.org/10.1037/1082-989X.10.4.389
#'
#' Cousineau, D., & O'Brien, F. (2014). Error bars in within-subject designs:
#' a comment on Baguley (2012). \emph{Behavior Research Methods}, 46(4),
#' 1149-1151. https://doi.org/10.3758/s13428-013-0441-z
#'
#' Cumming, G., & Finch, S. (2005). Inference by Eye: Confidence Intervals and
#' How to Read Pictures of Data. \emph{American Psychologist}, 60(2), 170-180.
#' https://doi.org/10.1037/0003-066X.60.2.170
#'
#' Knol, M. J., Pestman, W. R., & Grobbee, D. E. (2011). The (mis)use of
#' overlap of confidence intervals to assess effect modification.
#' \emph{European Journal of Epidemiology}, 26(4), 253-254.
#' https://doi.org/10.1007/s10654-011-9563-8
#'
#' Schenker, N., & Gentleman, J. F. (2001). On Judging the Significance of
#' Differences by Examining the Overlap Between Confidence Intervals.
#' \emph{The American Statistician}, 55(3), 182-186.
#' https://doi.org/10.1198/000313001317097960
#'
#'
#' @importFrom stats aggregate sd qt
#'
#' @example examples/examples.afex_plot.R
#'
#' @export
afex_plot <- function(object, ...) UseMethod("afex_plot", object)
# @method afex_plot afex_aov
#' @rdname afex_plot
#' @export
afex_plot.afex_aov <- function(object,
x,
trace,
panel,
mapping,
error = "model",
error_ci = TRUE,
error_level = 0.95,
error_arg = list(width = 0),
data_plot = TRUE,
data_geom,
data_alpha = 0.5,
data_arg = list(color = "darkgrey"),
point_arg = list(),
line_arg = list(),
emmeans_arg = list(),
dodge = 0.5,
return = "plot",
factor_levels = list(),
legend_title,
...) {
return <- match.arg(return, c("plot", "data"))
error <- match.arg(error, c("none",
"model",
"mean",
"within", "CMO",
"between"))
dots <- list(...)
if (length(dots) > 0) {
warning("Additional arguments ignored: ",
paste(names(dots), collapse = ", "), call. = FALSE)
}
x <- get_plot_var(x)
trace <- get_plot_var(trace)
panel <- get_plot_var(panel)
all_vars <- c(x, trace, panel)
emms <- get_emms(object = object,
x = x,
trace = trace,
panel = panel,
emmeans_arg = emmeans_arg,
factor_levels = factor_levels,
level = error_level)
## prepare raw (i.e., participant by cell) data
data <- prep_data(object$data$long,
x = x,
trace = trace,
panel = panel,
factor_levels = factor_levels,
dv_col = attr(object, "dv"),
id = attr(object, "id"))
### prepare variables for SE/CI calculation
within_vars <- all_vars[all_vars %in% names(attr(object, "within"))]
between_vars <- all_vars[all_vars %in% names(attr(object, "between"))]
### check if error bars are consistent with panel(s) and warn otherwise
if (error %in% c("model", "mean", "between") &&
all(c(x, trace) %in% within_vars)) {
warning("Panel(s) show within-subjects factors, ",
"but not within-subjects error bars.\n",
'For within-subjects error bars use: error = "within"',
call. = FALSE)
} else if (error %in% c("within", "CMO") &&
all(c(x, trace) %in% between_vars)) {
warning("Panel(s) show between-subjects factors, ",
"but within-subjects error bars.\n",
'For between-subjects error bars use e.g.,: ',
'error = "model" or error = "mean"',
call. = FALSE)
} else if (any(between_vars %in% c(x, trace)) &&
any(within_vars %in% c(x, trace)) &&
error != "none") {
warning("Panel(s) show a mixed within-between-design.\n",
"Error bars do not allow comparisons across all means.\n",
'Suppress error bars with: error = "none"',
call. = FALSE)
}
tmp <- get_data_based_cis(emms = emms,
data = data,
error = error,
id = attr(object, "id"), ## colname holding the id/grouping variable
all_vars = all_vars,
within_vars = within_vars,
between_vars = between_vars,
error_level = error_level,
error_ci = error_ci)
emms <- tmp$emms
error_plot <- tmp$error_plot
return(afex_plot_internal(x = x,
trace = trace,
panel = panel,
means = emms,
data = data,
error_plot = error_plot,
error_arg = error_arg,
dodge = dodge,
data_plot = data_plot,
data_geom = data_geom,
data_alpha = data_alpha,
data_arg = data_arg,
point_arg = point_arg,
line_arg = line_arg,
mapping = mapping,
legend_title = legend_title,
return = return
))
}
# @method afex_plot afex_aov
#' @rdname afex_plot
#' @export
afex_plot.mixed <- function(object,
x,
trace,
panel,
mapping,
id,
error = "model",
error_ci = TRUE,
error_level = 0.95,
error_arg = list(width = 0),
data_plot = TRUE,
data_geom,
data_alpha = 0.5,
data_arg = list(color = "darkgrey"),
point_arg = list(),
line_arg = list(),
emmeans_arg = list(),
dodge = 0.5,
return = "plot",
factor_levels = list(),
legend_title,
...) {
return <- match.arg(return, c("plot", "data"))
error <- match.arg(error, c("none",
"model",
"mean",
"within", "CMO",
"between"))
dots <- list(...)
if (length(dots) > 0) {
warning("Additional arguments ignored: ",
paste(names(dots), collapse = ", "), call. = FALSE)
}
x <- get_plot_var(x)
trace <- get_plot_var(trace)
panel <- get_plot_var(panel)
all_vars <- c(x, trace, panel)
if (missing(id)) {
id <- unique(names(lme4::ranef(object$full_model)))
message("Aggregating data over: ", paste(id, collapse = ", "))
}
## prepare raw (i.e., participant by cell) data
data <- prep_data(object$data,
x = x,
trace = trace,
panel = panel,
factor_levels = factor_levels,
dv_col = deparse(object$full_model@call[["formula"]][[2]]),
id = id)
data$afex_id <- interaction(data[id], sep = ".")
if (!(error %in% c("none" ,"model", "mean")) &
(length(id) > 1)) {
stop("When aggregating over multiple random effects,\n",
' error has to be in: c("model", "mean", "none")',
call. = FALSE)
}
emms <- get_emms(object = object,
x = x,
trace = trace,
panel = panel,
emmeans_arg = emmeans_arg,
factor_levels = factor_levels,
level = error_level)
attr(emms, "dv") <- deparse(object$full_model@call[["formula"]][[2]])
if (length(id) == 1) {
all_within <- lapply(lme4::findbars(object$call), all.vars)
all_within <-
unique(unlist(
all_within[vapply(all_within, function(x) id %in% x, NA)]
))
all_within <- all_within[all_within != id]
within_vars <- all_vars[all_vars %in% all_within]
between_vars <- all_vars[!(all_vars %in% within_vars)]
}
### prepare variables for SE/CI calculation
tmp <- get_data_based_cis(emms = emms,
data = data,
error = error,
id = "afex_id", ## colname holding the id/grouping variable
all_vars = all_vars,
within_vars = within_vars,
between_vars = between_vars,
error_level = error_level,
error_ci = error_ci)
emms <- tmp$emms
error_plot <- tmp$error_plot
return(afex_plot_internal(x = x,
trace = trace,
panel = panel,
means = emms,
data = data,
error_plot = error_plot,
error_arg = error_arg,
dodge = dodge,
data_plot = data_plot,
data_geom = data_geom,
data_alpha = data_alpha,
data_arg = data_arg,
point_arg = point_arg,
line_arg = line_arg,
mapping = mapping,
legend_title = legend_title,
return = return
))
}
#' @rdname afex_plot
#' @export
afex_plot.merMod <- function(object,
x,
trace,
panel,
mapping,
id,
error = "model",
error_ci = TRUE,
error_level = 0.95,
error_arg = list(width = 0),
data_plot = TRUE,
data_geom,
data_alpha = 0.5,
data_arg = list(color = "darkgrey"),
point_arg = list(),
line_arg = list(),
emmeans_arg = list(),
dodge = 0.5,
return = "plot",
factor_levels = list(),
legend_title,
...) {
return <- match.arg(return, c("plot", "data"))
error <- match.arg(error, c("none",
"model",
"mean",
"within", "CMO",
"between"))
dots <- list(...)
if (length(dots) > 0) {
warning("Additional arguments ignored: ",
paste(names(dots), collapse = ", "), call. = FALSE)
}
x <- get_plot_var(x)
trace <- get_plot_var(trace)
panel <- get_plot_var(panel)
all_vars <- c(x, trace, panel)
if (missing(id)) {
id <- unique(names(lme4::ranef(object)))
message("Aggregating data over: ", paste(id, collapse = ", "))
}
## prepare raw (i.e., participant by cell) data
data <- prep_data(
data = emmeans::recover_data(
object = object,
trms = terms(object, fixed.only = FALSE)
),
x = x,
trace = trace,
panel = panel,
factor_levels = factor_levels,
dv_col = deparse(object@call[["formula"]][[2]]),
id = id)
data$afex_id <- interaction(data[id], sep = ".")
if (!(error %in% c("none" ,"model", "mean")) &
(length(id) > 1)) {
stop("When aggregating over multiple random effects,\n",
' error has to be in: c("model", "mean", "none")',
call. = FALSE)
}
emms <- get_emms(object = object,
x = x,
trace = trace,
panel = panel,
emmeans_arg = emmeans_arg,
factor_levels = factor_levels,
level = error_level)
attr(emms, "dv") <- deparse(object@call[["formula"]][[2]])
if (length(id) == 1) {
all_within <- lapply(lme4::findbars(object@call), all.vars)
all_within <-
unique(unlist(
all_within[vapply(all_within, function(x) id %in% x, NA)]
))
all_within <- all_within[all_within != id]
within_vars <- all_vars[all_vars %in% all_within]
between_vars <- all_vars[!(all_vars %in% within_vars)]
}
### prepare variables for SE/CI calculation
tmp <- get_data_based_cis(emms = emms,
data = data,
error = error,
id = "afex_id", ## colname holding the id/grouping variable
all_vars = all_vars,
within_vars = within_vars,
between_vars = between_vars,
error_level = error_level,
error_ci = error_ci)
emms <- tmp$emms
error_plot <- tmp$error_plot
return(afex_plot_internal(x = x,
trace = trace,
panel = panel,
means = emms,
data = data,
error_plot = error_plot,
error_arg = error_arg,
dodge = dodge,
data_plot = data_plot,
data_geom = data_geom,
data_alpha = data_alpha,
data_arg = data_arg,
point_arg = point_arg,
line_arg = line_arg,
mapping = mapping,
legend_title = legend_title,
return = return
))
}
#' @rdname afex_plot
#' @export
afex_plot.default <- function(object,
x,
trace,
panel,
mapping,
id,
dv,
data,
within_vars,
between_vars,
error = "model",
error_ci = TRUE,
error_level = 0.95,
error_arg = list(width = 0),
data_plot = TRUE,
data_geom,
data_alpha = 0.5,
data_arg = list(color = "darkgrey"),
point_arg = list(),
line_arg = list(),
emmeans_arg = list(),
dodge = 0.5,
return = "plot",
factor_levels = list(),
legend_title,
...) {
return <- match.arg(return, c("plot", "data"))
error <- match.arg(error, c("none",
"model",
"mean",
"within", "CMO",
"between"))
dots <- list(...)
if (length(dots) > 0) {
warning("Additional arguments ignored: ",
paste(names(dots), collapse = ", "), call. = FALSE)
}
x <- get_plot_var(x)
trace <- get_plot_var(trace)
panel <- get_plot_var(panel)
all_vars <- c(x, trace, panel)
if (missing(dv)) {
formula_name <- names(object$call)[2]
message("dv column detected: ", deparse(object$call[[formula_name]][[2]]))
dv <- deparse(object$call[[formula_name]][[2]])
}
## prepare raw (i.e., participant by cell) data if missing
if (missing(data)) {
data <- emmeans::recover_data(
object = object,
trms = terms(object)
)
}
if (missing(id)) {
message("No id column passed. ",
"Assuming all rows are independent samples.")
data$id <- factor(seq_len(nrow(data)))
id <- "id"
}
data <- prep_data(
data = data,
x = x,
trace = trace,
panel = panel,
factor_levels = factor_levels,
dv_col = dv,
id = id)
data$afex_id <- interaction(data[id], sep = ".")
if (!(error %in% c("none" ,"model", "mean")) &
(length(id) > 1)) {
stop("When aggregating over multiple ids,\n",
' error has to be in: c("model", "mean", "none")',
call. = FALSE)
}
emms <- get_emms(object = object,
x = x,
trace = trace,
panel = panel,
emmeans_arg = emmeans_arg,
factor_levels = factor_levels,
level = error_level)
attr(emms, "dv") <- dv
if (missing(within_vars) & !missing(between_vars)) {
within_vars <- all_vars[!(all_vars %in% within_vars)]
}
if (!missing(within_vars) & missing(between_vars)) {
between_vars <- all_vars[!(all_vars %in% between_vars)]
}
### prepare variables for SE/CI calculation
tmp <- get_data_based_cis(emms = emms,
data = data,
error = error,
id = "afex_id", ## colname holding the id/grouping variable
all_vars = all_vars,
within_vars = within_vars,
between_vars = between_vars,
error_level = error_level,
error_ci = error_ci)
emms <- tmp$emms
error_plot <- tmp$error_plot
return(afex_plot_internal(x = x,
trace = trace,
panel = panel,
means = emms,
data = data,
error_plot = error_plot,
error_arg = error_arg,
dodge = dodge,
data_plot = data_plot,
data_geom = data_geom,
data_alpha = data_alpha,
data_arg = data_arg,
point_arg = point_arg,
line_arg = line_arg,
mapping = mapping,
legend_title = legend_title,
return = return
))
}
###if(getRversion() >= "2.15.1") utils::globalVariables(c("error", "y", "x"))
#' @rdname afex_plot
#' @export
interaction_plot <- function(means,
data,
mapping = c("shape", "lineytpe"),
error_plot = TRUE,
error_arg = list(width = 0),
data_plot = TRUE,
data_geom = ggplot2::geom_point,
data_alpha = 0.5,
data_arg = list(color = "darkgrey"),
point_arg = list(),
line_arg = list(),
dodge = 0.5,
legend_title,
col_x = "x",
col_y = "y",
col_trace = "trace",
col_panel = "panel",
col_lower = "lower",
col_upper = "upper") {
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("package ggplot2 is required.", call. = FALSE)
}
if (missing(mapping)) {
mapping <- c('shape', 'linetype')
} else if (length(mapping) == 0) {
stop("mapping cannot be empty. Possible values: 'shape', 'color', 'linetype'.",
call. = FALSE)
}
tmp_list <- as.list(rep(col_trace, length(mapping)))
names(tmp_list) <- mapping
error_mapping <- mapping[!(mapping %in% c("linetype", "shape", "fill"))]
tmp_list_error <- as.list(rep(col_trace, length(error_mapping)))
names(tmp_list_error) <- error_mapping
plot_out <- ggplot2::ggplot(data = means,
mapping = do.call(
what = ggplot2::aes_string,
args = c(list(
y = col_y,
x = col_x,
group = col_trace),
tmp_list)))
if (data_plot) {
if (missing(data_geom)) {
data_geom <- ggplot2::geom_point
}
data_arg$alpha <- data_alpha
if (!("position" %in% names(data_arg)) &
("position" %in% names(formals(data_geom)))) {
data_arg$position = ggplot2::position_dodge(width = dodge)
}
plot_out <- plot_out +
do.call(what = data_geom,
args = c(
#mapping = list(ggplot2::aes(group = interaction(x, trace))),
mapping =
list(
ggplot2::aes_string(
group =
paste0("interaction(",
paste0(c(col_x, col_trace), collapse = ", "),
")")
)),
data = list(data),
data_arg
)
)
}
for (i in levels(data$trace)) {
tmp_means <- means
tmp_means[means$trace != i, c(col_y, col_lower, col_upper)] <- NA
#tmp_means <- tmp_means[means$trace == i,]
plot_out <- plot_out +
do.call(what = ggplot2::geom_point,
args = c(
data = list(tmp_means),
position = list(
ggplot2::position_dodge(width = dodge)
),
point_arg,
na.rm = list(TRUE)
)) +
do.call(what = ggplot2::geom_line,
args = c(
data = list(tmp_means),
position = list(
ggplot2::position_dodge(width = dodge)
),
line_arg,
na.rm = list(TRUE)
))
if (error_plot) {
plot_out <- plot_out +
do.call(what = ggplot2::geom_errorbar,
args = c(
data = list(tmp_means),
mapping = list(do.call(
what = ggplot2::aes_string,
args = c(list(
x = col_x,
ymin = col_lower,
ymax = col_upper,
group = col_trace),
tmp_list_error))),
position = list(ggplot2::position_dodge(width = dodge)),
error_arg,
na.rm = list(TRUE),
inherit.aes = list(FALSE)
))
}
}
if (length(unique(means$panel)) > 1) {
plot_out <- plot_out +
ggplot2::facet_wrap(facets = "panel")
}
## add labels
if (!is.null(attr(means, "dv"))) {
plot_out <- plot_out +
ggplot2::ylab(attr(means, "dv"))
}
if (!is.null(attr(means, "x"))) {
plot_out <- plot_out +
ggplot2::xlab(attr(means, "x"))
}
if (!missing(legend_title)) {
legend_title <- paste(legend_title, collapse = "\n")
tmp_list <- rep(list(ggplot2::guide_legend(title = legend_title)),
length(mapping))
names(tmp_list) <- mapping
plot_out <- plot_out +
do.call(what = ggplot2::guides,
args = tmp_list)
}
return(plot_out)
}
#' @rdname afex_plot
#' @export
oneway_plot <- function(means,
data,
mapping = "",
error_plot = TRUE,
error_arg = list(width = 0),
data_plot = TRUE,
data_geom = ggbeeswarm::geom_beeswarm,
data_alpha = 0.5,
data_arg = list(color = "darkgrey"),
point_arg = list(),
legend_title,
col_x = "x",
col_y = "y",
col_panel = "panel",
col_lower = "lower",
col_upper = "upper") {
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("package ggplot2 is required.", call. = FALSE)
}
if (missing(mapping)) {
mapping <- ""
}
if (length(mapping) > 1 || mapping[1] != "") {
tmp_list <- as.list(rep(col_x, length(mapping)))
names(tmp_list) <- mapping
error_mapping <- mapping[!(mapping %in% c("linetype", "shape", "fill"))]
tmp_list_error <- as.list(rep(col_x, length(error_mapping)))
names(tmp_list_error) <- error_mapping
} else {
tmp_list <- list()
tmp_list_error <- list()
}
plot_out <- ggplot2::ggplot(data = means,
mapping = do.call(
what = ggplot2::aes_string,
args = c(list(
y = col_y,
x = col_x,
group = col_x),
tmp_list)))
if (data_plot) {
if (missing(data_geom)) {
if (!requireNamespace("ggbeeswarm", quietly = TRUE)) {
stop("package ggbeeswarm is required.", call. = FALSE)
}
data_geom <- ggbeeswarm::geom_beeswarm
}
data_arg$alpha <- data_alpha
plot_out <- plot_out +
do.call(what = data_geom,
args = c(
data = list(data),
data_arg
)
)
}
plot_out <- plot_out +
do.call(what = ggplot2::geom_point,
args = point_arg)
if (error_plot) {
plot_out <- plot_out +
do.call(what = ggplot2::geom_errorbar,
args = c(
mapping = list(do.call(
what = ggplot2::aes_string,
args = c(list(
x = col_x,
ymin = col_lower,
ymax = col_upper),
tmp_list_error))),
error_arg,
inherit.aes = list(FALSE)
))
}
if (length(unique(means$panel)) > 1) {
plot_out <- plot_out +
ggplot2::facet_wrap(facets = "panel")
}
## add labels
if (!is.null(attr(means, "dv"))) {
plot_out <- plot_out +
ggplot2::ylab(attr(means, "dv"))
}
if (!is.null(attr(means, "x"))) {
plot_out <- plot_out +
ggplot2::xlab(attr(means, "x"))
}
if (!missing(legend_title)) {
legend_title <- paste(legend_title, collapse = "\n")
tmp_list <- rep(list(ggplot2::guide_legend(title = legend_title)),
length(mapping))
names(tmp_list) <- mapping
plot_out <- plot_out +
do.call(what = ggplot2::guides,
args = tmp_list)
}
return(plot_out)
}
|
\name{Menu.facTab1}
\alias{Menu.facTab1}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Basic information for full factorial designs}
\description{This help file describes usage of the basic information tab of
the full factorial design menu}
\section{Brief statistical background}{
Full factorial designs consist of all possible combinations of factor levels,
i.e. the number of runs is the product of all numbers of factor levels,
for example 24 for an experiment with two 2-level factors and one 6-level
factor. Of course, their size grows fast with the number of factors. If
a full factorial design is not feasible, orthogonal main effects designs
or (manually-generated) combinations of smaller designs may be a reasonable
option.}
\section{Inputs on Tab Base Settings}{
\describe{
\item{name of design}{must be a valid name. The design itself is created under
this name in the R workspace. }
\item{number of runs}{is a consequence of the specifications on the Factor Details tab.
It is displayed for information purposes only; its value is only valid if
the Factor Details tab contains entries for all factors.}
\item{number of factors}{must always be specified.
The number of factors must match the number of entries on the Factor Details tab.}
\item{replications}{is the number of times each experimental run is conducted.
If larger than 1, each run is conducted several times. If the checkbox next
to the number of replications is checked, it is assumed that the experiment
involves repeated measurements for one setup of the experimental run; if it
is not checked, the experimental run itself is replicated with everything
relevant newly set up (much more valuable than repeated measurements, unless
the key driver of variability is in the measuring step). If the check box is
not checked, the experiment will be randomized separately for each round of
replications (first all first runs, then all second runs etc.).}
\item{number of blocks}{is the number of equally-sized blocks of homogeneous
units into which the overall number of runs is to be subdivided.
Note that the number of blocks must be compatible with the numbers of
levels of the experiment: it must be the product of one or more primes
that the numbers of levels factor into. For example, \cr
a design with three factors at 2,5 and 5 levels can have
5 blocks or 10 units each, no other blocking is possible for this design
because it would confound blocks with factor main effects;\cr
a design with three factors at 2, 6 and 6 levels can have 2, 3, 6, 4 or 12 blocks,
because all these numbers of blocks can be obtained from the three 2s and two 3s
the numbers of levels factor into without confounding a main effect.\cr
An error message will be given whenever an impossible number of
blocks or a number of blocks that would require aliasing of blocks with
main effects is used; the design is generated with a warning message
whenever the block factor is aliased
with any two-factor interaction among the design factors. }
\item{randomization settings}{should normally not be changed; you can provide a
seed if you want to exactly reproduce a randomized design created in the past.
Unchecking the randomization box will produce a non-randomized experiment.
This is usually NOT recommended.}
}
}
\author{ Ulrike Groemping }
\seealso{ See Also \code{\link[DoE.base]{fac.design}} for the function that does the calculations
and \code{\link{Menu.General}} for overall help on the general factorial design menu.}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ design }
\keyword{ array }% __ONLY ONE__ keyword per line
| /man/Menu.facTab1.Rd | no_license | cran/RcmdrPlugin.DoE | R | false | false | 3,926 | rd | \name{Menu.facTab1}
\alias{Menu.facTab1}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Basic information for full factorial designs}
\description{This help file describes usage of the basic information tab of
the full factorial design menu}
\section{Brief statistical background}{
Full factorial designs consist of all possible combinations of factor levels,
i.e. the number of runs is the product of all numbers of factor levels,
for example 24 for an experiment with two 2-level factors and one 6-level
factor. Of course, their size grows fast with the number of factors. If
a full factorial design is not feasible, orthogonal main effects designs
or (manually-generated) combinations of smaller designs may be a reasonable
option.}
\section{Inputs on Tab Base Settings}{
\describe{
\item{name of design}{must be a valid name. The design itself is created under
this name in the R workspace. }
\item{number of runs}{is a consequence of the specifications on the Factor Details tab.
It is displayed for information purposes only; its value is only valid if
the Factor Details tab contains entries for all factors.}
\item{number of factors}{must always be specified.
The number of factors must match the number of entries on the Factor Details tab.}
\item{replications}{is the number of times each experimental run is conducted.
If larger than 1, each run is conducted several times. If the checkbox next
to the number of replications is checked, it is assumed that the experiment
involves repeated measurements for one setup of the experimental run; if it
is not checked, the experimental run itself is replicated with everything
relevant newly set up (much more valuable than repeated measurements, unless
the key driver of variability is in the measuring step). If the check box is
not checked, the experiment will be randomized separately for each round of
replications (first all first runs, then all second runs etc.).}
\item{number of blocks}{is the number of equally-sized blocks of homogeneous
units into which the overall number of runs is to be subdivided.
Note that the number of blocks must be compatible with the numbers of
levels of the experiment: it must be the product of one or more primes
that the numbers of levels factor into. For example, \cr
a design with three factors at 2,5 and 5 levels can have
5 blocks or 10 units each, no other blocking is possible for this design
because it would confound blocks with factor main effects;\cr
a design with three factors at 2, 6 and 6 levels can have 2, 3, 6, 4 or 12 blocks,
because all these numbers of blocks can be obtained from the three 2s and two 3s
the numbers of levels factor into without confounding a main effect.\cr
An error message will be given whenever an impossible number of
blocks or a number of blocks that would require aliasing of blocks with
main effects is used; the design is generated with a warning message
whenever the block factor is aliased
with any two-factor interaction among the design factors. }
\item{randomization settings}{should normally not be changed; you can provide a
seed if you want to exactly reproduce a randomized design created in the past.
Unchecking the randomization box will produce a non-randomized experiment.
This is usually NOT recommended.}
}
}
\author{ Ulrike Groemping }
\seealso{ See Also \code{\link[DoE.base]{fac.design}} for the function that does the calculations
and \code{\link{Menu.General}} for overall help on the general factorial design menu.}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ design }
\keyword{ array }% __ONLY ONE__ keyword per line
|
library(cancensus)
### Name: as_census_region_list
### Title: Convert a (suitably filtered) data frame from
### 'list_census_regions' to a list suitable for passing to 'get_census'.
### Aliases: as_census_region_list
### ** Examples
## Not run:
##D library(dplyr, warn.conflicts = FALSE)
##D
##D # Query the CensusMapper API for the total occupied dwellings
##D # of 20 random Census Subdivisions, in Census 2016.
##D regions <- list_census_regions("CA16") %>%
##D filter(level == "CSD") %>%
##D sample_n(20) %>%
##D as_census_region_list()
##D
##D occupied <- get_census("CA16", regions = regions,
##D vectors = c("v_CA16_408"),
##D level = "Regions")
## End(Not run)
| /data/genthat_extracted_code/cancensus/examples/as_census_region_list.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 743 | r | library(cancensus)
### Name: as_census_region_list
### Title: Convert a (suitably filtered) data frame from
### 'list_census_regions' to a list suitable for passing to 'get_census'.
### Aliases: as_census_region_list
### ** Examples
## Not run:
##D library(dplyr, warn.conflicts = FALSE)
##D
##D # Query the CensusMapper API for the total occupied dwellings
##D # of 20 random Census Subdivisions, in Census 2016.
##D regions <- list_census_regions("CA16") %>%
##D filter(level == "CSD") %>%
##D sample_n(20) %>%
##D as_census_region_list()
##D
##D occupied <- get_census("CA16", regions = regions,
##D vectors = c("v_CA16_408"),
##D level = "Regions")
## End(Not run)
|
df <- read.table("F:/R/course.Exploratory Data Analysis/household_power_consumption.txt", sep=";", header = TRUE)
df$Date <- as.Date(as.character(df$Date),"%d/%m/%Y")
df.new <- df[df$Date >= "2007-02-01"& df$Date <= "2007-02-02",]
df.new[,3:9] <- sapply(df.new[,3:9], function(x) as.numeric(as.character(x)))
png(file="F:/R/course.Exploratory Data Analysis/Plot/plot1.png",bg="white")
hist(df.new$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
par(mfcol=c(1,1))
| /plot1.R | no_license | noahmw/ExData_Plotting1 | R | false | false | 536 | r | df <- read.table("F:/R/course.Exploratory Data Analysis/household_power_consumption.txt", sep=";", header = TRUE)
df$Date <- as.Date(as.character(df$Date),"%d/%m/%Y")
df.new <- df[df$Date >= "2007-02-01"& df$Date <= "2007-02-02",]
df.new[,3:9] <- sapply(df.new[,3:9], function(x) as.numeric(as.character(x)))
png(file="F:/R/course.Exploratory Data Analysis/Plot/plot1.png",bg="white")
hist(df.new$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
par(mfcol=c(1,1))
|
library(reticulate)
library(keras)
library(mlbench)
library(neuralnet)
library(magrittr)
library(dplyr)
library(tensorflow)
# use_condaenv("r-reticulate")
#taking data
data("BostonHousing")
data <- BostonHousing
str(data)
data %<>% mutate_if(is.factor,as.numeric)
str(data)
#NeuralNetModel
n <- neuralnet(medv ~ crim+zn+indus+chas+nox+rm+age+dis+rad+tax+ptratio+b+lstat,
data = data,
hidden = c(10,5),
linear.output = F,
lifesign = 'full',
rep=1)
plot(n,
col.hidden = 'darkgreen',
col.hidden.synapse = 'darkgreen',
show.weights = F,
information = F,
fill = 'lightblue')
data<-as.matrix(data)
dimnames(data) <- NULL
#Partition
set.seed(1234)
ind <- sample(2, nrow(data), replace = T, prob = c(0.70,0.30))
train <- data[ind==1,1:13]
test <- data[ind==2,1:13]
traintarget <- data[ind==1,14]
testtarget <- data[ind==2,14]
#Normalisation
#Normalised val = (Value - mean)/standard deviation
m <- colMeans(train)
s <- apply(train, 2, sd)
train <- scale(train, center = m, scale = s)
test <- scale(test, center = m, scale = s)
#Create model
model <- keras_model_sequential()
model %>%
layer_dense(units = 50, activation = 'relu', input_shape= c(13)) %>%
layer_dense(units = 40, activation = 'relu') %>%
layer_dense(units = 1)
#model_compile
model %>% compile(loss = 'mse',
optimizer = 'rmsprop',
metrics = 'mae')
#fit model
mymodel <- model %>%
fit(train,
traintarget,
epochs = 150,
batch_size = 32,
validation_split = 0.2)
#Evaluate
model %>% evaluate(test, testtarget)
pred <- model %>% predict(test)
mean((testtarget-pred)^2)
plot(testtarget,pred)
| /NeuralnetDL02.R | no_license | RitBh123/RWork | R | false | false | 1,823 | r | library(reticulate)
library(keras)
library(mlbench)
library(neuralnet)
library(magrittr)
library(dplyr)
library(tensorflow)
# use_condaenv("r-reticulate")
#taking data
data("BostonHousing")
data <- BostonHousing
str(data)
data %<>% mutate_if(is.factor,as.numeric)
str(data)
#NeuralNetModel
n <- neuralnet(medv ~ crim+zn+indus+chas+nox+rm+age+dis+rad+tax+ptratio+b+lstat,
data = data,
hidden = c(10,5),
linear.output = F,
lifesign = 'full',
rep=1)
plot(n,
col.hidden = 'darkgreen',
col.hidden.synapse = 'darkgreen',
show.weights = F,
information = F,
fill = 'lightblue')
data<-as.matrix(data)
dimnames(data) <- NULL
#Partition
set.seed(1234)
ind <- sample(2, nrow(data), replace = T, prob = c(0.70,0.30))
train <- data[ind==1,1:13]
test <- data[ind==2,1:13]
traintarget <- data[ind==1,14]
testtarget <- data[ind==2,14]
#Normalisation
#Normalised val = (Value - mean)/standard deviation
m <- colMeans(train)
s <- apply(train, 2, sd)
train <- scale(train, center = m, scale = s)
test <- scale(test, center = m, scale = s)
#Create model
model <- keras_model_sequential()
model %>%
layer_dense(units = 50, activation = 'relu', input_shape= c(13)) %>%
layer_dense(units = 40, activation = 'relu') %>%
layer_dense(units = 1)
#model_compile
model %>% compile(loss = 'mse',
optimizer = 'rmsprop',
metrics = 'mae')
#fit model
mymodel <- model %>%
fit(train,
traintarget,
epochs = 150,
batch_size = 32,
validation_split = 0.2)
#Evaluate
model %>% evaluate(test, testtarget)
pred <- model %>% predict(test)
mean((testtarget-pred)^2)
plot(testtarget,pred)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/intercept.R
\name{step_intercept}
\alias{step_intercept}
\title{Add intercept (or constant) column}
\usage{
step_intercept(
recipe,
...,
role = "predictor",
trained = FALSE,
name = "intercept",
value = 1,
skip = FALSE,
id = rand_id("intercept")
)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the sequence of
operations for this recipe.}
\item{...}{Argument ignored; included for consistency with other step
specification functions.}
\item{role}{For model terms created by this step, what analysis
role should they be assigned?. By default, the function assumes
that the new columns created from the original variables will be
used as predictors in a model.}
\item{trained}{A logical to indicate if the quantities for preprocessing
have been estimated. Again included for consistency.}
\item{name}{Character name for newly added column}
\item{value}{A numeric constant to fill the intercept column. Defaults to 1.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake.recipe]{bake.recipe()}}? While all operations are baked
when \code{\link[=prep.recipe]{prep.recipe()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations}
\item{id}{A character string that is unique to this step to identify it.}
}
\value{
An updated version of \code{recipe} with the
new step added to the sequence of existing steps (if any).
}
\description{
\code{step_intercept} creates a \emph{specification} of a recipe step that
will add an intercept or constant term in the first column of a data
matrix. \code{step_intercept} has defaults to \emph{predictor} role so
that it is by default called in the bake step. Be careful to avoid
unintentional transformations when calling steps with
\code{all_predictors}.
}
\examples{
library(modeldata)
data(biomass)
biomass_tr <- biomass[biomass$dataset == "Training",]
biomass_te <- biomass[biomass$dataset == "Testing",]
rec <- recipe(HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
data = biomass_tr)
rec_trans <- recipe(HHV ~ ., data = biomass_tr[, -(1:2)]) \%>\%
step_intercept(value = 2) \%>\%
step_scale(carbon)
rec_obj <- prep(rec_trans, training = biomass_tr)
with_intercept <- bake(rec_obj, biomass_te)
with_intercept
}
\seealso{
\code{\link[=recipe]{recipe()}} \code{\link[=prep.recipe]{prep.recipe()}} \code{\link[=bake.recipe]{bake.recipe()}}
}
| /man/step_intercept.Rd | permissive | labouz/recipes | R | false | true | 2,643 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/intercept.R
\name{step_intercept}
\alias{step_intercept}
\title{Add intercept (or constant) column}
\usage{
step_intercept(
recipe,
...,
role = "predictor",
trained = FALSE,
name = "intercept",
value = 1,
skip = FALSE,
id = rand_id("intercept")
)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the sequence of
operations for this recipe.}
\item{...}{Argument ignored; included for consistency with other step
specification functions.}
\item{role}{For model terms created by this step, what analysis
role should they be assigned?. By default, the function assumes
that the new columns created from the original variables will be
used as predictors in a model.}
\item{trained}{A logical to indicate if the quantities for preprocessing
have been estimated. Again included for consistency.}
\item{name}{Character name for newly added column}
\item{value}{A numeric constant to fill the intercept column. Defaults to 1.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake.recipe]{bake.recipe()}}? While all operations are baked
when \code{\link[=prep.recipe]{prep.recipe()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations}
\item{id}{A character string that is unique to this step to identify it.}
}
\value{
An updated version of \code{recipe} with the
new step added to the sequence of existing steps (if any).
}
\description{
\code{step_intercept} creates a \emph{specification} of a recipe step that
will add an intercept or constant term in the first column of a data
matrix. \code{step_intercept} has defaults to \emph{predictor} role so
that it is by default called in the bake step. Be careful to avoid
unintentional transformations when calling steps with
\code{all_predictors}.
}
\examples{
library(modeldata)
data(biomass)
biomass_tr <- biomass[biomass$dataset == "Training",]
biomass_te <- biomass[biomass$dataset == "Testing",]
rec <- recipe(HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
data = biomass_tr)
rec_trans <- recipe(HHV ~ ., data = biomass_tr[, -(1:2)]) \%>\%
step_intercept(value = 2) \%>\%
step_scale(carbon)
rec_obj <- prep(rec_trans, training = biomass_tr)
with_intercept <- bake(rec_obj, biomass_te)
with_intercept
}
\seealso{
\code{\link[=recipe]{recipe()}} \code{\link[=prep.recipe]{prep.recipe()}} \code{\link[=bake.recipe]{bake.recipe()}}
}
|
## This program is used to cache the inverse of a Matrix to improve the performance. As we know that Matrix inversion
## is usually a costly computation.
## It creates a new Matrix that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function creates an inverse of the matrix created by the function makeCacheMatrix. If the inverse is already
## calculated, then it should return the cached inverse matrix, otherwise compute the
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
message("calculating inverse")
inv
}
| /cachematrix.R | no_license | Arijit-Nath/ProgrammingAssignment2 | R | false | false | 1,073 | r | ## This program is used to cache the inverse of a Matrix to improve the performance. As we know that Matrix inversion
## is usually a costly computation.
## It creates a new Matrix that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function creates an inverse of the matrix created by the function makeCacheMatrix. If the inverse is already
## calculated, then it should return the cached inverse matrix, otherwise compute the
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
message("calculating inverse")
inv
}
|
# load packages
library(tidyverse)
library(ggplot2)
library(ggtext)
library(tidytext)
library(ggpmthemes)
library(patchwork)
library(here)
#Questions:
#Differences between in-state and out-of-state tuition by state
#in-state and out-of-state tuition:
# https://www.quora.com/What-are-in-state-and-out-of-state-tuition-fees
#which states have the largest differences
#Data
tuition_cost <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-03-10/tuition_cost.csv')
census_regions <- readr::read_csv('https://raw.githubusercontent.com/alakin4/Data/master/us_census_bureau_regions_and_divisions.csv')
#look at tuition per year of study since degrees have different lenghts
tuition_cost <- tuition_cost%>%
mutate(nr_degree_length = as.numeric(str_sub(degree_length,1,1)),
yr_in_state_total = in_state_total/nr_degree_length,
yr_out_of_state_total = out_of_state_total/nr_degree_length)%>%
filter(!is.na(yr_out_of_state_total)| !is.na(yr_in_state_total))
avg_state_tuition_cost<-tuition_cost%>%
filter(!is.na(state))%>%
group_by(state)%>%
summarise(avg_yr_in_state_total = mean(yr_in_state_total),
avg_yr_out_of_state_total = mean(yr_out_of_state_total))%>%
mutate(diff_out_in_state = avg_yr_out_of_state_total-avg_yr_in_state_total)%>%
arrange(desc(diff_out_in_state))%>%
mutate(diff_rank = row_number())%>%
pivot_longer(cols = -c(state, diff_out_in_state,diff_rank),
names_to = "tuition_category",
values_to = "avg_annual_tutition" )%>%
left_join(census_regions, by = c("state" = "State"))
# make an ordered factor
avg_state_tuition_cost$state <- fct_reorder(factor(avg_state_tuition_cost$state),
avg_state_tuition_cost$diff_out_in_state,
.desc = TRUE)
avg_state_tuition_cost<-avg_state_tuition_cost%>%
mutate(state_rank = str_c(state , ' ', '(', diff_rank, ')'),
state_rank = reorder_within(state_rank,diff_out_in_state, Region))
#plot
#Sunburst sea color palette
#Background cloud: #D3D5D4
#class-one Aqua: #5F7880
#class-two Sun Kissed: #CCA085
#Text/other Blue Steel: #4B5556
my_title <- "<b><span style = 'color:#5F7880;'>In-state</span></b> vs <b><span style = 'color:#CCA085;'>Out-of-state</span></b> Tuition in the US"
my_subtitle <- "The difference between average tuition per year in each state for both residents (in-state) and non residents (out-of-state).\nThe size of the difference is shown by the length of the line and the state's rank-from biggest to smallest-is indicated in\nbrackets. The states are grouped into four regions; Midwest, Northeast, South, and West."
theme_set(theme_light_modified(base_family = "IBM Plex Sans"))
ggplot(data = avg_state_tuition_cost,
mapping = aes(y= state_rank)) +
geom_line(aes(x=avg_annual_tutition/1000),
size = 1,
color= '#4B5556')+
geom_point(aes(x=avg_annual_tutition/1000,
fill=tuition_category,
colour = tuition_category),
size = 2.5,
shape = 21)+
xlab("Average Tuition per yr ('000 USD)") +
ylab(NULL)+
scale_x_continuous( breaks = seq(0, 13000/1000, 1000/1000))+
scale_y_reordered()+
labs(
title= my_title,
subtitle = my_subtitle,
caption = "Tidytuesday week #11| Data: Tuitiontracker.org| @kinenealan"
)+
facet_wrap(~ Region, scales = "free_y",ncol=2)+
theme(
legend.position = "none",
text = element_text(color = "#4B5556", size = 14),
plot.background = element_rect(fill = "#D3D5D4"),
panel.background = element_rect(fill = "#D3D5D4", linetype = 'blank'),
plot.margin = margin(10, 40, 20, 20),
axis.title.x = element_text(margin = margin(t = 10),face="bold"),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.ticks = element_blank(),
axis.text = element_text(color = '#4B5556', size = 12),
panel.grid.major.x = element_line(color = "gray50", size = 0.2),
panel.spacing.y = unit(1,'lines'),
strip.background = element_rect(fill = "#D3D5D4"),
strip.text = element_text(hjust = 0, color = "#4B5556", size = 14, face = "bold"),
plot.title = element_markdown(hjust = 0, family = "IBM Plex Sans Bold", size = 21, face = "bold"),
plot.subtitle = element_text(hjust = 0 , size = 13, family = "IBM Plex Sans Medium Italic"),
plot.caption = element_text(color = "gray50", size = 10,margin = margin(t = 10))
)+
scale_fill_manual(values = c('#5F7880', '#CCA085'))+
scale_color_manual(values = c('#5F7880', '#CCA085'))+
ggsave(here::here("plots", "in_out_state_tuition.png"), dpi = 320, width = 14, height = 10, scale = 1)
| /TT-2020-w11/us_tuition.R | permissive | alakin4/TidyTuesday | R | false | false | 4,852 | r | # load packages
library(tidyverse)
library(ggplot2)
library(ggtext)
library(tidytext)
library(ggpmthemes)
library(patchwork)
library(here)
#Questions:
#Differences between in-state and out-of-state tuition by state
#in-state and out-of-state tuition:
# https://www.quora.com/What-are-in-state-and-out-of-state-tuition-fees
#which states have the largest differences
#Data
tuition_cost <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-03-10/tuition_cost.csv')
census_regions <- readr::read_csv('https://raw.githubusercontent.com/alakin4/Data/master/us_census_bureau_regions_and_divisions.csv')
#look at tuition per year of study since degrees have different lenghts
tuition_cost <- tuition_cost%>%
mutate(nr_degree_length = as.numeric(str_sub(degree_length,1,1)),
yr_in_state_total = in_state_total/nr_degree_length,
yr_out_of_state_total = out_of_state_total/nr_degree_length)%>%
filter(!is.na(yr_out_of_state_total)| !is.na(yr_in_state_total))
avg_state_tuition_cost<-tuition_cost%>%
filter(!is.na(state))%>%
group_by(state)%>%
summarise(avg_yr_in_state_total = mean(yr_in_state_total),
avg_yr_out_of_state_total = mean(yr_out_of_state_total))%>%
mutate(diff_out_in_state = avg_yr_out_of_state_total-avg_yr_in_state_total)%>%
arrange(desc(diff_out_in_state))%>%
mutate(diff_rank = row_number())%>%
pivot_longer(cols = -c(state, diff_out_in_state,diff_rank),
names_to = "tuition_category",
values_to = "avg_annual_tutition" )%>%
left_join(census_regions, by = c("state" = "State"))
# make an ordered factor
avg_state_tuition_cost$state <- fct_reorder(factor(avg_state_tuition_cost$state),
avg_state_tuition_cost$diff_out_in_state,
.desc = TRUE)
avg_state_tuition_cost<-avg_state_tuition_cost%>%
mutate(state_rank = str_c(state , ' ', '(', diff_rank, ')'),
state_rank = reorder_within(state_rank,diff_out_in_state, Region))
#plot
#Sunburst sea color palette
#Background cloud: #D3D5D4
#class-one Aqua: #5F7880
#class-two Sun Kissed: #CCA085
#Text/other Blue Steel: #4B5556
my_title <- "<b><span style = 'color:#5F7880;'>In-state</span></b> vs <b><span style = 'color:#CCA085;'>Out-of-state</span></b> Tuition in the US"
my_subtitle <- "The difference between average tuition per year in each state for both residents (in-state) and non residents (out-of-state).\nThe size of the difference is shown by the length of the line and the state's rank-from biggest to smallest-is indicated in\nbrackets. The states are grouped into four regions; Midwest, Northeast, South, and West."
theme_set(theme_light_modified(base_family = "IBM Plex Sans"))
ggplot(data = avg_state_tuition_cost,
mapping = aes(y= state_rank)) +
geom_line(aes(x=avg_annual_tutition/1000),
size = 1,
color= '#4B5556')+
geom_point(aes(x=avg_annual_tutition/1000,
fill=tuition_category,
colour = tuition_category),
size = 2.5,
shape = 21)+
xlab("Average Tuition per yr ('000 USD)") +
ylab(NULL)+
scale_x_continuous( breaks = seq(0, 13000/1000, 1000/1000))+
scale_y_reordered()+
labs(
title= my_title,
subtitle = my_subtitle,
caption = "Tidytuesday week #11| Data: Tuitiontracker.org| @kinenealan"
)+
facet_wrap(~ Region, scales = "free_y",ncol=2)+
theme(
legend.position = "none",
text = element_text(color = "#4B5556", size = 14),
plot.background = element_rect(fill = "#D3D5D4"),
panel.background = element_rect(fill = "#D3D5D4", linetype = 'blank'),
plot.margin = margin(10, 40, 20, 20),
axis.title.x = element_text(margin = margin(t = 10),face="bold"),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.ticks = element_blank(),
axis.text = element_text(color = '#4B5556', size = 12),
panel.grid.major.x = element_line(color = "gray50", size = 0.2),
panel.spacing.y = unit(1,'lines'),
strip.background = element_rect(fill = "#D3D5D4"),
strip.text = element_text(hjust = 0, color = "#4B5556", size = 14, face = "bold"),
plot.title = element_markdown(hjust = 0, family = "IBM Plex Sans Bold", size = 21, face = "bold"),
plot.subtitle = element_text(hjust = 0 , size = 13, family = "IBM Plex Sans Medium Italic"),
plot.caption = element_text(color = "gray50", size = 10,margin = margin(t = 10))
)+
scale_fill_manual(values = c('#5F7880', '#CCA085'))+
scale_color_manual(values = c('#5F7880', '#CCA085'))+
ggsave(here::here("plots", "in_out_state_tuition.png"), dpi = 320, width = 14, height = 10, scale = 1)
|
#find transfer temp
# prereq ------------------------------------------------------------------
require(rhdf5)
require(multidplyr)
require(tidyverse)
dir <- 'data/'
# functions ---------------------------------------------------------------
my_prep_pairs <- function(pairs) {
str_extract_all(pairs,'[0-9]',simplify = F) %>%
unlist() %>%
.[c(3,4)] %>%
str_c(sep = '_',collapse = '_')
}
my_read <- function(fol) {
data1 <- h5read(fol,'simulation')
data_param <- h5read(fol,'parameters')
df <- tibble(temp = data_param$T,
pairs = data1$results$`Spin Correlations`$labels,
mean = data1$results$`Spin Correlations`$mean$value,
error = data1$results$`Spin Correlations`$mean$error) %>%
rowwise() %>%
mutate(pairs = my_prep_pairs(pairs)) %>%
ungroup() %>%
separate( 'pairs',c('x','y'))
}
my_prep_plot <- function(df,t) {
temp1 <- df %>%
filter(temp == t) %>%
.$mean
dim(temp1) <- c(NROW(unique(df$x)),NROW(unique(df$y)))
return(temp1)
}
my_read_parameters <- function(fol){
h5read(fol,'parameters') %>%
enframe(name = 'parameter') %>%
mutate(value = map(value,as.character)) %>%
unnest(value)
}
my_read_sim <- function(fol) {
data1 <- h5read(fol,'simulation')$results %>%
enframe(name = 'type') %>%
mutate(value = map(value,enframe,name = 'parameter'))
return(data1)
}
my_read <- function(dir,cl = 3,pattern = '.out.h5'){
cluster <- create_cluster(cl)
df_data <- tibble(dir = list.files(dir,pattern = pattern,full.names = T),
id = rep(1:cl,length.out = NROW(dir))) %>%
partition(id,cluster = cluster) %>%
cluster_library(c('rhdf5','tidyverse')) %>%
cluster_assign_value('my_read_sim',my_read_sim) %>%
mutate(results = map(dir,my_read_sim)) %>%
collect() %>%
ungroup() %>%
select(-id) %>%
unnest(results)
parallel::stopCluster(cluster)
return(df_data)
}
# read files --------------------------------------------------------------
df_init_par <- tibble(dir = list.files(dir,pattern = '.out.h5',full.names = T)) %>%
mutate(init_par = map(dir,my_read_parameters)) %>%
unnest(init_par) %>%
filter(parameter %in% c('L','T')) %>%
spread(parameter,value)
df_data <- my_read(dir)
# work with staggered magnetization ---------------------------------------
df_data1 <- df_data %>%
filter(type %in% c("Staggered Magnetization^2","Staggered Magnetization^4")) %>%
unnest(value) %>%
filter(parameter %in% c('mean')) %>%
select(-c(parameter) ) %>%
mutate(res = map(value,enframe)) %>%
unnest(res) %>%
unnest(value) %>%
spread(name,value)
df <- left_join(df_init_par,df_data1) %>%
select(-dir) %>%
arrange(as.numeric(L),as.numeric(`T`)) %>%
mutate(L = as_factor(L),
error_convergence = as.factor(error_convergence))
df %>%
filter(error_convergence == 1)
df %>%
# filter(type == 'Staggered Magnetization^2') %>%
mutate(`T` = as.numeric(`T`)) %>%
group_by(L,`T`) %>%
mutate(value = value/as.numeric(L)^2) %>%
# mutate(value = mean(value[type == 'Staggered Magnetization^4']/value[type == 'Staggered Magnetization^2']^2)) %>%
ggplot(aes(`T`,value,col = L)) +
geom_line() +
geom_pointrange(aes(ymax = value + error,ymin = value - error)) +
geom_point(aes(shape = error_convergence),size = 3) +
facet_grid(type ~.,scales = 'free') +
theme_bw()
ss <- df %>%
# filter(type == 'Staggered Magnetization^2') %>%
group_by(L,`T`) %>%
summarise(n = n())
# work with energy --------------------------------------------------------
df_data1 <- df_data %>%
filter(type %in% c('Energy')) %>%
unnest(value) %>%
filter(parameter %in% c('mean')) %>%
select(-c(parameter) ) %>%
mutate(res = map(value,enframe)) %>%
unnest(res) %>%
unnest(value) %>%
spread(name,value)
df <- left_join(df_init_par,df_data1) %>%
select(-dir) %>%
arrange(as.numeric(L),as.numeric(T)) %>%
mutate(L = as_factor(L),
error_convergence = as.factor(error_convergence))
# work with Magnetization^2 -----------------------------------------------
df_data1 <- df_data %>%
filter(type %in% c('Magnetization^2','Magnetization^4')) %>%
unnest(value) %>%
filter(parameter %in% c('mean')) %>%
select(-c(parameter) ) %>%
mutate(res = map(value,enframe)) %>%
unnest(res) %>%
unnest(value) %>%
spread(name,value)
df <- left_join(df_init_par,df_data1) %>%
select(-dir) %>%
arrange(as.numeric(L),as.numeric(`T`)) %>%
mutate(L = as_factor(L),
error_convergence = as.factor(error_convergence))
df %>%
filter(error_convergence == 1)
df %>%
# filter(type == 'Staggered Magnetization^2') %>%
mutate(`T` = as.numeric(`T`)) %>%
group_by(L,`T`) %>%
mutate(value = (3/2 - (1 -1/3*mean(value[type == 'Magnetization^4'])/mean(value[type == 'Magnetization^2']^2)))) %>%
ggplot(aes(`T`,value,col = L)) +
geom_line() +
# geom_pointrange(aes(ymax = value + error,ymin = value - error)) +
geom_point(aes(shape = error_convergence),size = 3) +
# facet_grid(type ~.,scales = 'free') +
theme_bw()
| /read files.R | no_license | random-alex/temp | R | false | false | 5,160 | r | #find transfer temp
# prereq ------------------------------------------------------------------
require(rhdf5)
require(multidplyr)
require(tidyverse)
dir <- 'data/'
# functions ---------------------------------------------------------------
my_prep_pairs <- function(pairs) {
str_extract_all(pairs,'[0-9]',simplify = F) %>%
unlist() %>%
.[c(3,4)] %>%
str_c(sep = '_',collapse = '_')
}
my_read <- function(fol) {
data1 <- h5read(fol,'simulation')
data_param <- h5read(fol,'parameters')
df <- tibble(temp = data_param$T,
pairs = data1$results$`Spin Correlations`$labels,
mean = data1$results$`Spin Correlations`$mean$value,
error = data1$results$`Spin Correlations`$mean$error) %>%
rowwise() %>%
mutate(pairs = my_prep_pairs(pairs)) %>%
ungroup() %>%
separate( 'pairs',c('x','y'))
}
my_prep_plot <- function(df,t) {
temp1 <- df %>%
filter(temp == t) %>%
.$mean
dim(temp1) <- c(NROW(unique(df$x)),NROW(unique(df$y)))
return(temp1)
}
my_read_parameters <- function(fol){
h5read(fol,'parameters') %>%
enframe(name = 'parameter') %>%
mutate(value = map(value,as.character)) %>%
unnest(value)
}
my_read_sim <- function(fol) {
data1 <- h5read(fol,'simulation')$results %>%
enframe(name = 'type') %>%
mutate(value = map(value,enframe,name = 'parameter'))
return(data1)
}
my_read <- function(dir,cl = 3,pattern = '.out.h5'){
cluster <- create_cluster(cl)
df_data <- tibble(dir = list.files(dir,pattern = pattern,full.names = T),
id = rep(1:cl,length.out = NROW(dir))) %>%
partition(id,cluster = cluster) %>%
cluster_library(c('rhdf5','tidyverse')) %>%
cluster_assign_value('my_read_sim',my_read_sim) %>%
mutate(results = map(dir,my_read_sim)) %>%
collect() %>%
ungroup() %>%
select(-id) %>%
unnest(results)
parallel::stopCluster(cluster)
return(df_data)
}
# read files --------------------------------------------------------------
df_init_par <- tibble(dir = list.files(dir,pattern = '.out.h5',full.names = T)) %>%
mutate(init_par = map(dir,my_read_parameters)) %>%
unnest(init_par) %>%
filter(parameter %in% c('L','T')) %>%
spread(parameter,value)
df_data <- my_read(dir)
# work with staggered magnetization ---------------------------------------
df_data1 <- df_data %>%
filter(type %in% c("Staggered Magnetization^2","Staggered Magnetization^4")) %>%
unnest(value) %>%
filter(parameter %in% c('mean')) %>%
select(-c(parameter) ) %>%
mutate(res = map(value,enframe)) %>%
unnest(res) %>%
unnest(value) %>%
spread(name,value)
df <- left_join(df_init_par,df_data1) %>%
select(-dir) %>%
arrange(as.numeric(L),as.numeric(`T`)) %>%
mutate(L = as_factor(L),
error_convergence = as.factor(error_convergence))
df %>%
filter(error_convergence == 1)
df %>%
# filter(type == 'Staggered Magnetization^2') %>%
mutate(`T` = as.numeric(`T`)) %>%
group_by(L,`T`) %>%
mutate(value = value/as.numeric(L)^2) %>%
# mutate(value = mean(value[type == 'Staggered Magnetization^4']/value[type == 'Staggered Magnetization^2']^2)) %>%
ggplot(aes(`T`,value,col = L)) +
geom_line() +
geom_pointrange(aes(ymax = value + error,ymin = value - error)) +
geom_point(aes(shape = error_convergence),size = 3) +
facet_grid(type ~.,scales = 'free') +
theme_bw()
ss <- df %>%
# filter(type == 'Staggered Magnetization^2') %>%
group_by(L,`T`) %>%
summarise(n = n())
# work with energy --------------------------------------------------------
df_data1 <- df_data %>%
filter(type %in% c('Energy')) %>%
unnest(value) %>%
filter(parameter %in% c('mean')) %>%
select(-c(parameter) ) %>%
mutate(res = map(value,enframe)) %>%
unnest(res) %>%
unnest(value) %>%
spread(name,value)
df <- left_join(df_init_par,df_data1) %>%
select(-dir) %>%
arrange(as.numeric(L),as.numeric(T)) %>%
mutate(L = as_factor(L),
error_convergence = as.factor(error_convergence))
# work with Magnetization^2 -----------------------------------------------
df_data1 <- df_data %>%
filter(type %in% c('Magnetization^2','Magnetization^4')) %>%
unnest(value) %>%
filter(parameter %in% c('mean')) %>%
select(-c(parameter) ) %>%
mutate(res = map(value,enframe)) %>%
unnest(res) %>%
unnest(value) %>%
spread(name,value)
df <- left_join(df_init_par,df_data1) %>%
select(-dir) %>%
arrange(as.numeric(L),as.numeric(`T`)) %>%
mutate(L = as_factor(L),
error_convergence = as.factor(error_convergence))
df %>%
filter(error_convergence == 1)
df %>%
# filter(type == 'Staggered Magnetization^2') %>%
mutate(`T` = as.numeric(`T`)) %>%
group_by(L,`T`) %>%
mutate(value = (3/2 - (1 -1/3*mean(value[type == 'Magnetization^4'])/mean(value[type == 'Magnetization^2']^2)))) %>%
ggplot(aes(`T`,value,col = L)) +
geom_line() +
# geom_pointrange(aes(ymax = value + error,ymin = value - error)) +
geom_point(aes(shape = error_convergence),size = 3) +
# facet_grid(type ~.,scales = 'free') +
theme_bw()
|
# Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
hello <- function() {
print("Hello, world! From Package1")
}
| /package1/R/hello.R | permissive | robbydecosemaeker/multiple-packages | R | false | false | 464 | r | # Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
hello <- function() {
print("Hello, world! From Package1")
}
|
#single is a data frame from context for a single stock data
#strategy setup
moneytotal = 200000
close = 350
closeStr = paste("AvgClose",close,sep="")
source("r/metrics.R")
source("r/util.R")
calc = function(i,count){
calTrueFluc(i)
#20 low
calAvgTFFluc(i,count)
calAvgClose(i,count,close)
}
sell = function(i, count){
if(count >= 351 && share > 0 && single[i,"Close"] < single[i,closeStr] - 3* single[i,"N"]){
profit = 0
while(share > 0){
moneyleft <<- moneyleft + single[i,"Close"] * single[hold[share],"position"]*100
profit = profit + (single[i,"Close"] - single[hold[share],"Close"]) * 100 * single[hold[share],"position"]
share <<- share - 1
}
#single stock is ok sell will sell all
moneytotal <<- moneyleft
single[i,"moneyleft"] <<- moneyleft
single[i,"act"] <<- "sell"
single[i,"profit"] <<- profit
print("[sell] at trend")
print(single[i,])
print("holding shares:")
printAllStock(hold,single)
hold <<- c()
return(TRUE)
}
return(FALSE)
}
buy = function(i,count){
if(count >= 351 && single[i,"Close"] > single[i,closeStr] + 7 * single[i,"N"]){
if(share >= 4){
print("try to buy but fail:")
single[i,"act"] <<- "meet"
print(single[i,])
return(FALSE)
}
if(single[i,"Close"]*1.015 < single[i+1,"Open"]){
print("fail to make the deal")
single[i,"act"] <<- "fail"
print(single[i,])
print(single[i+1,])
return(FALSE)
}
if(share > 0 && single[i,"Close"] < single[hold[share],"start"] + single[hold[share],"N"]/2){
print("trend not obvious no buy")
single[i,"act"] <<- "notrend"
print(single[i,])
return(FALSE)
}
#in case cannot buy in
single[i,"start"] <<- single[i,"Close"]*1.015
single[i,"stop"] <<- single[i,]$Close - single[i,"N"]
single[i,"position"] <<- as.integer( moneytotal * 0.01 / (single[i,"N"]*100))
if(single[i,]$position*single[i,]$start*100 > moneyleft){
print("not enough money left to buy targe , all in")
single[i,]$position <<- as.integer(moneyleft/(single[i,]$start*100))
if(single[i,]$position == 0){
print("cannot afford a single share")
single[i,"act"]<<-"empty"
print(single[i,])
return(FALSE)
}
}
moneyleft <<- moneyleft - single[i,]$position * single[i,]$start * 100
#deal!
single[i,"moneyleft"] <<- moneyleft
share <<- share+1
single[i,"share"] <<- share
hold <<- c(hold,i)
single[i,"act"] <<- "buy"
print("[buy]hoding shares:")
printAllStock(hold,single)
#cannot sell on same day
}
}
| /strategy/atrFunc.R | no_license | syncShan/ana | R | false | false | 2,658 | r | #single is a data frame from context for a single stock data
#strategy setup
moneytotal = 200000
close = 350
closeStr = paste("AvgClose",close,sep="")
source("r/metrics.R")
source("r/util.R")
calc = function(i,count){
calTrueFluc(i)
#20 low
calAvgTFFluc(i,count)
calAvgClose(i,count,close)
}
sell = function(i, count){
if(count >= 351 && share > 0 && single[i,"Close"] < single[i,closeStr] - 3* single[i,"N"]){
profit = 0
while(share > 0){
moneyleft <<- moneyleft + single[i,"Close"] * single[hold[share],"position"]*100
profit = profit + (single[i,"Close"] - single[hold[share],"Close"]) * 100 * single[hold[share],"position"]
share <<- share - 1
}
#single stock is ok sell will sell all
moneytotal <<- moneyleft
single[i,"moneyleft"] <<- moneyleft
single[i,"act"] <<- "sell"
single[i,"profit"] <<- profit
print("[sell] at trend")
print(single[i,])
print("holding shares:")
printAllStock(hold,single)
hold <<- c()
return(TRUE)
}
return(FALSE)
}
buy = function(i,count){
if(count >= 351 && single[i,"Close"] > single[i,closeStr] + 7 * single[i,"N"]){
if(share >= 4){
print("try to buy but fail:")
single[i,"act"] <<- "meet"
print(single[i,])
return(FALSE)
}
if(single[i,"Close"]*1.015 < single[i+1,"Open"]){
print("fail to make the deal")
single[i,"act"] <<- "fail"
print(single[i,])
print(single[i+1,])
return(FALSE)
}
if(share > 0 && single[i,"Close"] < single[hold[share],"start"] + single[hold[share],"N"]/2){
print("trend not obvious no buy")
single[i,"act"] <<- "notrend"
print(single[i,])
return(FALSE)
}
#in case cannot buy in
single[i,"start"] <<- single[i,"Close"]*1.015
single[i,"stop"] <<- single[i,]$Close - single[i,"N"]
single[i,"position"] <<- as.integer( moneytotal * 0.01 / (single[i,"N"]*100))
if(single[i,]$position*single[i,]$start*100 > moneyleft){
print("not enough money left to buy targe , all in")
single[i,]$position <<- as.integer(moneyleft/(single[i,]$start*100))
if(single[i,]$position == 0){
print("cannot afford a single share")
single[i,"act"]<<-"empty"
print(single[i,])
return(FALSE)
}
}
moneyleft <<- moneyleft - single[i,]$position * single[i,]$start * 100
#deal!
single[i,"moneyleft"] <<- moneyleft
share <<- share+1
single[i,"share"] <<- share
hold <<- c(hold,i)
single[i,"act"] <<- "buy"
print("[buy]hoding shares:")
printAllStock(hold,single)
#cannot sell on same day
}
}
|
script.dir <- function() {
# from http://stackoverflow.com/a/16046056
dirname(sys.frame(1)$ofile)
}
### Start with project dir, and helper functions
projectDir <- normalizePath(file.path(script.dir(), ".."))
getFullPath <- function(subpath){ file.path(projectDir, subpath) }
homeFolder <- path.expand("~")
dataFolder <- file.path(homeFolder, "Dropbox", "enhancer_predictions")
plotFolder <- file.path(dataFolder, "AdamPlots")
getFullPlotPath <- function(subpath){ file.path(plotFolder, subpath) }
getDropboxPath <- function(subpath){ file.path(dataFolder, subpath) }
exportAsTable <- function(df, file){ write.table(df,file=file,quote=FALSE, row.names=FALSE,sep="\t") }
clear <- function(save.vec=c()){ ls.vec <- ls(globalenv());del.vec <-setdiff(ls.vec,c(save.vec,"clear")); rm(list=del.vec,pos=globalenv())}
readInTable <- function(file) read.table(file=file,stringsAsFactors=FALSE,header=TRUE)
# setup libs
# install if needed from http://stackoverflow.com/a/4090208
list.of.packages <- c(
"ggplot2",
"ROCR", # http://cran.r-project.org/web/packages/ROCR/index.html
"glmnet", # http://cran.r-project.org/web/packages/glmnet/glmnet.pdf
"randomForest", #http://cran.at.r-project.org/web/packages/randomForest/randomForest.pdf
"doParallel",
"foreach",
"mboost",
"vcd", # mosaicpl
"C50", # kuhn:411
"mda", # fda, kuhn:362
"gam",
"reshape2", # needed for melt
"MASS",
"devtools" # for github installs
)
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
if (!"gbm" %in% installed.packages()) {
install_url("http://cran.r-project.org/src/contrib/Archive/gbm/gbm_2.0-8.tar.gz")
}
#install_github("harrysouthworth/gbm")
#load libs
lapply(list.of.packages, function(lib){
library(lib, character.only=TRUE)
})
calcNumCores <- function(){
numCores <- detectCores()
if(numCores > 8){
numCores <- numCores / 2
} else if(numCores == 1){
numCores <- 1
} else {
numCores <- numCores - 1
}
cat("using", numCores, "cores")
return(numCores)
}
#registerDoParallel(calcNumCores())
registerDoParallel(10)
## load in other libs
source(getFullPath("analysis/dataInput.R"))
source(getFullPath("analysis/predLib.R"))
source(getFullPath("analysis/runPredOnData.R"))
source(getFullPath("analysis/plotResults.R"))
## main analysis
main.mouse.forebrain.test <- function(){
# forebrain
forebrain.data.dir <- makeDir(getFullPath("data/"))
forebrain.mldata <- paste(forebrain.data.dir,"dataForPred.tab",sep="")
forebrain.out.file <- getDropboxPath("mouse_test_forebrain_master_table_raw_scores_results.tab")
forebrain.plots.dir <- makeDir(getFullPlotPath("gbmGenomicPrediction/mouse/forebrain/"))
forebrain.train.df <- cleanMouseForebrain()
forebrain.test.df <- cleanMouseForebrain(mouse.file=mouse.test.forebrain)
#exportAsTable(df=forebrain.df, file=forebrain.mldata)
results.df <- runGbmOnTestSet(df.train=forebrain.train.df,df.test=forebrain.test.df,cols=getForebrainCols(),
outfile=forebrain.out.file, outdir=forebrain.plots.dir)
exploritoryPlotsGenomeTest(df=results.df, cols=getForebrainCols(), outdir=forebrain.plots.dir,msg="Forebrain Data -> explore")
}
main.mouse.heart.test <- function(){
# heart
heart.data.dir <- makeDir(getFullPath("data//"))
heart.mldata <- paste(heart.data.dir,"dataForPred.tab",sep="")
heart.out.file <- getDropboxPath("mouse_test_heart_master_table_raw_scores_results.tab")
heart.plots.dir <- makeDir(getFullPlotPath("gbmGenomicPrediction/mouse/heart/"))
heart.train.df <- cleanMouseHeart()
heart.test.df <- cleanMouseHeart(mouse.file=mouse.test.heart)
#exportAsTable(df=heart.df, file=heart.mldata)
# run algorithms "trials" number of times -> save result
results.df <- runGbmOnTestSet(df.train=heart.train.df,df.test=heart.test.df,cols=getHeartCols(),
outfile=heart.out.file, outdir=heart.plots.dir)
exploritoryPlotsGenomeTest(df=results.df, cols=getHeartCols(), outdir=heart.plots.dir,msg="Heart Data -> test on genome")
}
main.human.heart.test <- function(){
# heart
heart.data.dir <- makeDir(getFullPath("data/"))
heart.mldata <- paste(heart.data.dir,"dataForPred.tab",sep="")
heart.out.file <- getDropboxPath("human_test_heart_master_table_raw_scores_results.tab")
heart.plots.dir <- makeDir(getFullPlotPath("gbmGenomicPrediction/human/heart/"))
heart.train.df <- cleanHumanHeart()
heart.test.df <- cleanHumanHeart(human.file=human.test.heart)
heart.test.df$label <- NULL
colnames(heart.test.df)[5] <- "label"
colnames(heart.test.df) <- colnames(heart.train.df)
#exportAsTable(df=heart.df, file=heart.mldata)
# run algorithms "trials" number of times -> save result
results.df <- runGbmOnTestSet(df.train=heart.train.df,df.test=heart.test.df,cols=getHeartColsHuman(),
outfile=heart.out.file, outdir=heart.plots.dir)
exploritoryPlotsGenomeTest(df=results.df, cols=getHeartColsHuman(), outdir=heart.plots.dir,msg="Heart Data -> test on genome")
}
main.heart <- function(){
# heart
heart.data.dir <- makeDir(getFullPath("data/heart/"))
heart.mldata <- paste(heart.data.dir,"dataForPred.tab",sep="")
heart.mlresults <- paste(heart.data.dir,"mlResults.tab",sep="")
heart.plots.dir <- makeDir(getFullPlotPath("algoCompare/mouse/heart/"))
heart.df <- cleanMouseHeart()
exportAsTable(df=heart.df, file=heart.mldata)
# run algorithms "trials" number of times -> save result
heart.ml.df <- accumMlAlgos(df=heart.df, cols=getHeartCols(),
trials=30, resultFile=heart.mlresults)
# exploritory analysis of hearts data
exploritoryPlots(df=heart.df, cols=getHeartCols(), outdir=heart.plots.dir,msg="Heart Data -> explore")
# plot the results of each ml algo on the test/training divisions
plotMlresults(df=heart.ml.df, outdir = heart.plots.dir,msg="Heart data -> AW")
}
main.brain <- function(){
# brain
brain.data.dir <- makeDir(getFullPath("data/brain/"))
brain.mldata <- paste(brain.data.dir,"dataForPred.tab",sep="")
brain.mlresults <- paste(brain.data.dir,"mlResults.tab",sep="")
brain.plots.dir <- makeDir(getFullPlotPath("algoCompare/mouse/brain/"))
brain.df <- cleanMouseBrain()
exportAsTable(df=brain.df, file=brain.mldata)
# exploritory analysis of hearts data
exploritoryPlots(df=brain.df, cols=getBrainCols(), outdir=brain.plots.dir,msg="Brain Data -> explore")
# run algorithms "trials" number of times -> save result
brain.ml.df <- accumMlAlgos(df=brain.df,cols=getBrainCols(), trials=30,resultFile=brain.mlresults)
brain.ml.df <- accumMlAlgos(df=brain.df,cols=getBrainCols(),
trials=30,resultFile=brain.mlresults)
# plot the results of each ml algo on the test/training divisions
plotMlresults(df=brain.ml.df, outdir = brain.plots.dir,msg="Brain data -> AW")
}
main.forebrain <- function(){
# forebrain
forebrain.data.dir <- makeDir(getFullPath("data/forebrain/"))
forebrain.mldata <- paste(forebrain.data.dir,"dataForPred.tab",sep="")
forebrain.mlresults <- paste(forebrain.data.dir,"mlResults.tab",sep="")
forebrain.plots.dir <- makeDir(getFullPlotPath("algoCompare/mouse/forebrain/"))
forebrain.df <- cleanMouseForebrain()
exportAsTable(df=forebrain.df, file=forebrain.mldata)
# run algorithms "trials" number of times -> save result
forebrain.ml.df <- accumMlAlgos(df=forebrain.df,cols=getForebrainCols(),
trials=30,resultFile=forebrain.mlresults)
# exploritory analysis of hearts data
exploritoryPlots(df=forebrain.df, cols=getForebrainCols(), outdir=forebrain.plots.dir,msg="Forebrain Data -> explore")
# plot the results of each ml algo on the test/training divisions
plotMlresults(df=forebrain.ml.df, outdir = forebrain.plots.dir,msg="Forebrain data -> AW")
}
main.humanBrain <- function(){
# forebrain
brain.human.data.dir <- makeDir(getFullPath("data/human/brain/"))
brain.human.mldata <- paste(brain.human.data.dir,"dataForPred.tab",sep="")
brain.human.mlresults <- paste(brain.human.data.dir,"mlResults.tab",sep="")
brain.human.plots.dir <- makeDir(getFullPlotPath("algoCompare/human/brain/"))
brain.human.df <- cleanHumanBrain()
exportAsTable(df=brain.human.df, file=brain.human.mldata)
# run algorithms "trials" number of times -> save result
human.brain.ml.df <- accumMlAlgos(df=brain.human.df,cols=getBrainColsHuman(),
trials=30,resultFile=brain.human.mlresults)
# exploritory analysis of hearts data
exploritoryPlots(df=brain.human.df, cols=getBrainColsHuman(), outdir=brain.human.plots.dir,msg="Forebrain Data -> explore")
# plot the results of each ml algo on the test/training divisions
plotMlresults(df=human.brain.ml.df, outdir = brain.human.plots.dir,msg="Forebrain Human data -> AW")
}
main.humanHeart <- function(){
# heart in human
heart.human.data.dir <- makeDir(getFullPath("data/human/heart/"))
heart.human.mldata <- paste(heart.human.data.dir,"dataForPred.tab",sep="")
heart.human.mlresults <- paste(heart.human.data.dir,"mlResults.tab",sep="")
heart.human.plots.dir <- makeDir(getFullPlotPath("algoCompare/human/heart/"))
heart.human.df <- cleanHumanHeart()
exportAsTable(df=heart.human.df, file=heart.human.mldata)
# run algorithms "trials" number of times -> save result
human.heart.ml.df <- accumMlAlgos(df=heart.human.df,cols=getHeartColsHuman(),
trials=30,resultFile=heart.human.mlresults)
# exploritory analysis of hearts data
exploritoryPlots(df=heart.human.df, cols=getHeartColsHuman(), outdir=heart.human.plots.dir,msg="Foreheart Data -> explore")
# plot the results of each ml algo on the test/training divisions
plotMlresults(df=human.heart.ml.df, outdir = heart.human.plots.dir,msg="Foreheart Human data -> AW")
}
#main.humanHeart();main.humanBrain()
modelGBM <- function(){
df.list <- list("forebrain"=cleanMouseForebrain(),
"brain"=cleanMouseBrain(),
"heart"=cleanMouseHeart() )
dir.list <- list("forebrain"= makeDir(getFullPlotPath("gbm/mouse/forebrain/")),
"brain"=makeDir(getFullPlotPath("gbm/mouse/brain/")),
"heart"=makeDir(getFullPlotPath("gbm/mouse/heart/")) )
cols.list <- list("forebrain"=getForebrainCols(),
"brain"=getBrainCols(),
"heart"=getHeartCols() )
for(tissue in c("heart", "forebrain", "brain")){
runGbmOnDataSet(df=df.list[[tissue]],cols=cols.list[[tissue]],outdir=dir.list[[tissue]])
runGbmTopFive(df=df.list[[tissue]],cols=cols.list[[tissue]],outdir=dir.list[[tissue]])
gbmResults.df <- accumMlAlgosGbmTop5(df=df.list[[tissue]],cols=cols.list[[tissue]],
trials=30,resultFile=paste(dir.list[[tissue]],"/gbmTop5compare.tab",sep=""),seed=412)
plotMlresults(df=gbmResults.df, outdir = dir.list[[tissue]],msg=paste("Tissue =", tissue,"\ngbm top 5 vs. normal compare"))
}
}
modelGBM.human <- function(){
df.list <- list("brain"=cleanHumanBrain(),
"heart"=cleanHumanHeart() )
dir.list <- list("brain"=makeDir(getFullPlotPath("gbm/human/brain/")),
"heart"=makeDir(getFullPlotPath("gbm/human/heart/")) )
cols.list <- list("brain"=getBrainColsHuman(),
"heart"=getHeartColsHuman() )
for(tissue in c("heart","brain")){
runGbmOnDataSet(df=df.list[[tissue]],cols=cols.list[[tissue]],outdir=dir.list[[tissue]])
runGbmTopFive(df=df.list[[tissue]],cols=cols.list[[tissue]],outdir=dir.list[[tissue]])
gbmResults.df <- accumMlAlgosGbmTop5(df=df.list[[tissue]],cols=cols.list[[tissue]],
trials=30,resultFile=paste(dir.list[[tissue]],"/gbmTop5compare.tab",sep=""),seed=412)
plotMlresults(df=gbmResults.df, outdir = dir.list[[tissue]],msg=paste("Tissue =", tissue,"\ngbm top 4 vs. normal compare")) }
}
main.modelGbmHumanMouse <- function(){
cat("******************************** modelling gbm in human....\n")
modelGBM.human()
cat("done")
cat("******************************** modeling gbm in mouse...\n")
modelGBM()
cat("done")
}
main <- function(){
cat("******************************** running heart...\n")
main.heart()
cat("******************************** running forebrain...\n")
main.forebrain()
cat("******************************** assessing GBM model on entire dataset...\n")
modelGBM()
cat("******************************** done...\n")
}
runHuman <- function(){
cat("******************************** beating human heart...\n")
#main.humanHeart();
cat("******************************** thinking brain...\n")
main.humanBrain()
cat("******************************** assessing GBM model on entire dataset...\n")
modelGBM()
cat("******************************** done...\n")
}
runPredOnGenomeData <- function(){
cat("mouse heart")
main.mouse.heart.test()
cat("mouse forebrain")
main.mouse.forebrain.test()
cat("human heart")
main.human.heart.test()
}
| /mlAlgoAW/analysis/main.R | no_license | stjordanis/enhancer_pred | R | false | false | 13,268 | r | script.dir <- function() {
# from http://stackoverflow.com/a/16046056
dirname(sys.frame(1)$ofile)
}
### Start with project dir, and helper functions
projectDir <- normalizePath(file.path(script.dir(), ".."))
getFullPath <- function(subpath){ file.path(projectDir, subpath) }
homeFolder <- path.expand("~")
dataFolder <- file.path(homeFolder, "Dropbox", "enhancer_predictions")
plotFolder <- file.path(dataFolder, "AdamPlots")
getFullPlotPath <- function(subpath){ file.path(plotFolder, subpath) }
getDropboxPath <- function(subpath){ file.path(dataFolder, subpath) }
exportAsTable <- function(df, file){ write.table(df,file=file,quote=FALSE, row.names=FALSE,sep="\t") }
clear <- function(save.vec=c()){ ls.vec <- ls(globalenv());del.vec <-setdiff(ls.vec,c(save.vec,"clear")); rm(list=del.vec,pos=globalenv())}
readInTable <- function(file) read.table(file=file,stringsAsFactors=FALSE,header=TRUE)
# setup libs
# install if needed from http://stackoverflow.com/a/4090208
list.of.packages <- c(
"ggplot2",
"ROCR", # http://cran.r-project.org/web/packages/ROCR/index.html
"glmnet", # http://cran.r-project.org/web/packages/glmnet/glmnet.pdf
"randomForest", #http://cran.at.r-project.org/web/packages/randomForest/randomForest.pdf
"doParallel",
"foreach",
"mboost",
"vcd", # mosaicpl
"C50", # kuhn:411
"mda", # fda, kuhn:362
"gam",
"reshape2", # needed for melt
"MASS",
"devtools" # for github installs
)
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
if (!"gbm" %in% installed.packages()) {
install_url("http://cran.r-project.org/src/contrib/Archive/gbm/gbm_2.0-8.tar.gz")
}
#install_github("harrysouthworth/gbm")
#load libs
lapply(list.of.packages, function(lib){
library(lib, character.only=TRUE)
})
calcNumCores <- function(){
numCores <- detectCores()
if(numCores > 8){
numCores <- numCores / 2
} else if(numCores == 1){
numCores <- 1
} else {
numCores <- numCores - 1
}
cat("using", numCores, "cores")
return(numCores)
}
#registerDoParallel(calcNumCores())
registerDoParallel(10)
## load in other libs
source(getFullPath("analysis/dataInput.R"))
source(getFullPath("analysis/predLib.R"))
source(getFullPath("analysis/runPredOnData.R"))
source(getFullPath("analysis/plotResults.R"))
## main analysis
main.mouse.forebrain.test <- function(){
# forebrain
forebrain.data.dir <- makeDir(getFullPath("data/"))
forebrain.mldata <- paste(forebrain.data.dir,"dataForPred.tab",sep="")
forebrain.out.file <- getDropboxPath("mouse_test_forebrain_master_table_raw_scores_results.tab")
forebrain.plots.dir <- makeDir(getFullPlotPath("gbmGenomicPrediction/mouse/forebrain/"))
forebrain.train.df <- cleanMouseForebrain()
forebrain.test.df <- cleanMouseForebrain(mouse.file=mouse.test.forebrain)
#exportAsTable(df=forebrain.df, file=forebrain.mldata)
results.df <- runGbmOnTestSet(df.train=forebrain.train.df,df.test=forebrain.test.df,cols=getForebrainCols(),
outfile=forebrain.out.file, outdir=forebrain.plots.dir)
exploritoryPlotsGenomeTest(df=results.df, cols=getForebrainCols(), outdir=forebrain.plots.dir,msg="Forebrain Data -> explore")
}
main.mouse.heart.test <- function(){
# heart
heart.data.dir <- makeDir(getFullPath("data//"))
heart.mldata <- paste(heart.data.dir,"dataForPred.tab",sep="")
heart.out.file <- getDropboxPath("mouse_test_heart_master_table_raw_scores_results.tab")
heart.plots.dir <- makeDir(getFullPlotPath("gbmGenomicPrediction/mouse/heart/"))
heart.train.df <- cleanMouseHeart()
heart.test.df <- cleanMouseHeart(mouse.file=mouse.test.heart)
#exportAsTable(df=heart.df, file=heart.mldata)
# run algorithms "trials" number of times -> save result
results.df <- runGbmOnTestSet(df.train=heart.train.df,df.test=heart.test.df,cols=getHeartCols(),
outfile=heart.out.file, outdir=heart.plots.dir)
exploritoryPlotsGenomeTest(df=results.df, cols=getHeartCols(), outdir=heart.plots.dir,msg="Heart Data -> test on genome")
}
main.human.heart.test <- function(){
# heart
heart.data.dir <- makeDir(getFullPath("data/"))
heart.mldata <- paste(heart.data.dir,"dataForPred.tab",sep="")
heart.out.file <- getDropboxPath("human_test_heart_master_table_raw_scores_results.tab")
heart.plots.dir <- makeDir(getFullPlotPath("gbmGenomicPrediction/human/heart/"))
heart.train.df <- cleanHumanHeart()
heart.test.df <- cleanHumanHeart(human.file=human.test.heart)
heart.test.df$label <- NULL
colnames(heart.test.df)[5] <- "label"
colnames(heart.test.df) <- colnames(heart.train.df)
#exportAsTable(df=heart.df, file=heart.mldata)
# run algorithms "trials" number of times -> save result
results.df <- runGbmOnTestSet(df.train=heart.train.df,df.test=heart.test.df,cols=getHeartColsHuman(),
outfile=heart.out.file, outdir=heart.plots.dir)
exploritoryPlotsGenomeTest(df=results.df, cols=getHeartColsHuman(), outdir=heart.plots.dir,msg="Heart Data -> test on genome")
}
main.heart <- function(){
# heart
heart.data.dir <- makeDir(getFullPath("data/heart/"))
heart.mldata <- paste(heart.data.dir,"dataForPred.tab",sep="")
heart.mlresults <- paste(heart.data.dir,"mlResults.tab",sep="")
heart.plots.dir <- makeDir(getFullPlotPath("algoCompare/mouse/heart/"))
heart.df <- cleanMouseHeart()
exportAsTable(df=heart.df, file=heart.mldata)
# run algorithms "trials" number of times -> save result
heart.ml.df <- accumMlAlgos(df=heart.df, cols=getHeartCols(),
trials=30, resultFile=heart.mlresults)
# exploritory analysis of hearts data
exploritoryPlots(df=heart.df, cols=getHeartCols(), outdir=heart.plots.dir,msg="Heart Data -> explore")
# plot the results of each ml algo on the test/training divisions
plotMlresults(df=heart.ml.df, outdir = heart.plots.dir,msg="Heart data -> AW")
}
main.brain <- function(){
# brain
brain.data.dir <- makeDir(getFullPath("data/brain/"))
brain.mldata <- paste(brain.data.dir,"dataForPred.tab",sep="")
brain.mlresults <- paste(brain.data.dir,"mlResults.tab",sep="")
brain.plots.dir <- makeDir(getFullPlotPath("algoCompare/mouse/brain/"))
brain.df <- cleanMouseBrain()
exportAsTable(df=brain.df, file=brain.mldata)
# exploritory analysis of hearts data
exploritoryPlots(df=brain.df, cols=getBrainCols(), outdir=brain.plots.dir,msg="Brain Data -> explore")
# run algorithms "trials" number of times -> save result
brain.ml.df <- accumMlAlgos(df=brain.df,cols=getBrainCols(), trials=30,resultFile=brain.mlresults)
brain.ml.df <- accumMlAlgos(df=brain.df,cols=getBrainCols(),
trials=30,resultFile=brain.mlresults)
# plot the results of each ml algo on the test/training divisions
plotMlresults(df=brain.ml.df, outdir = brain.plots.dir,msg="Brain data -> AW")
}
main.forebrain <- function(){
# forebrain
forebrain.data.dir <- makeDir(getFullPath("data/forebrain/"))
forebrain.mldata <- paste(forebrain.data.dir,"dataForPred.tab",sep="")
forebrain.mlresults <- paste(forebrain.data.dir,"mlResults.tab",sep="")
forebrain.plots.dir <- makeDir(getFullPlotPath("algoCompare/mouse/forebrain/"))
forebrain.df <- cleanMouseForebrain()
exportAsTable(df=forebrain.df, file=forebrain.mldata)
# run algorithms "trials" number of times -> save result
forebrain.ml.df <- accumMlAlgos(df=forebrain.df,cols=getForebrainCols(),
trials=30,resultFile=forebrain.mlresults)
# exploritory analysis of hearts data
exploritoryPlots(df=forebrain.df, cols=getForebrainCols(), outdir=forebrain.plots.dir,msg="Forebrain Data -> explore")
# plot the results of each ml algo on the test/training divisions
plotMlresults(df=forebrain.ml.df, outdir = forebrain.plots.dir,msg="Forebrain data -> AW")
}
main.humanBrain <- function(){
# forebrain
brain.human.data.dir <- makeDir(getFullPath("data/human/brain/"))
brain.human.mldata <- paste(brain.human.data.dir,"dataForPred.tab",sep="")
brain.human.mlresults <- paste(brain.human.data.dir,"mlResults.tab",sep="")
brain.human.plots.dir <- makeDir(getFullPlotPath("algoCompare/human/brain/"))
brain.human.df <- cleanHumanBrain()
exportAsTable(df=brain.human.df, file=brain.human.mldata)
# run algorithms "trials" number of times -> save result
human.brain.ml.df <- accumMlAlgos(df=brain.human.df,cols=getBrainColsHuman(),
trials=30,resultFile=brain.human.mlresults)
# exploritory analysis of hearts data
exploritoryPlots(df=brain.human.df, cols=getBrainColsHuman(), outdir=brain.human.plots.dir,msg="Forebrain Data -> explore")
# plot the results of each ml algo on the test/training divisions
plotMlresults(df=human.brain.ml.df, outdir = brain.human.plots.dir,msg="Forebrain Human data -> AW")
}
main.humanHeart <- function(){
# heart in human
heart.human.data.dir <- makeDir(getFullPath("data/human/heart/"))
heart.human.mldata <- paste(heart.human.data.dir,"dataForPred.tab",sep="")
heart.human.mlresults <- paste(heart.human.data.dir,"mlResults.tab",sep="")
heart.human.plots.dir <- makeDir(getFullPlotPath("algoCompare/human/heart/"))
heart.human.df <- cleanHumanHeart()
exportAsTable(df=heart.human.df, file=heart.human.mldata)
# run algorithms "trials" number of times -> save result
human.heart.ml.df <- accumMlAlgos(df=heart.human.df,cols=getHeartColsHuman(),
trials=30,resultFile=heart.human.mlresults)
# exploritory analysis of hearts data
exploritoryPlots(df=heart.human.df, cols=getHeartColsHuman(), outdir=heart.human.plots.dir,msg="Foreheart Data -> explore")
# plot the results of each ml algo on the test/training divisions
plotMlresults(df=human.heart.ml.df, outdir = heart.human.plots.dir,msg="Foreheart Human data -> AW")
}
#main.humanHeart();main.humanBrain()
modelGBM <- function(){
df.list <- list("forebrain"=cleanMouseForebrain(),
"brain"=cleanMouseBrain(),
"heart"=cleanMouseHeart() )
dir.list <- list("forebrain"= makeDir(getFullPlotPath("gbm/mouse/forebrain/")),
"brain"=makeDir(getFullPlotPath("gbm/mouse/brain/")),
"heart"=makeDir(getFullPlotPath("gbm/mouse/heart/")) )
cols.list <- list("forebrain"=getForebrainCols(),
"brain"=getBrainCols(),
"heart"=getHeartCols() )
for(tissue in c("heart", "forebrain", "brain")){
runGbmOnDataSet(df=df.list[[tissue]],cols=cols.list[[tissue]],outdir=dir.list[[tissue]])
runGbmTopFive(df=df.list[[tissue]],cols=cols.list[[tissue]],outdir=dir.list[[tissue]])
gbmResults.df <- accumMlAlgosGbmTop5(df=df.list[[tissue]],cols=cols.list[[tissue]],
trials=30,resultFile=paste(dir.list[[tissue]],"/gbmTop5compare.tab",sep=""),seed=412)
plotMlresults(df=gbmResults.df, outdir = dir.list[[tissue]],msg=paste("Tissue =", tissue,"\ngbm top 5 vs. normal compare"))
}
}
modelGBM.human <- function(){
df.list <- list("brain"=cleanHumanBrain(),
"heart"=cleanHumanHeart() )
dir.list <- list("brain"=makeDir(getFullPlotPath("gbm/human/brain/")),
"heart"=makeDir(getFullPlotPath("gbm/human/heart/")) )
cols.list <- list("brain"=getBrainColsHuman(),
"heart"=getHeartColsHuman() )
for(tissue in c("heart","brain")){
runGbmOnDataSet(df=df.list[[tissue]],cols=cols.list[[tissue]],outdir=dir.list[[tissue]])
runGbmTopFive(df=df.list[[tissue]],cols=cols.list[[tissue]],outdir=dir.list[[tissue]])
gbmResults.df <- accumMlAlgosGbmTop5(df=df.list[[tissue]],cols=cols.list[[tissue]],
trials=30,resultFile=paste(dir.list[[tissue]],"/gbmTop5compare.tab",sep=""),seed=412)
plotMlresults(df=gbmResults.df, outdir = dir.list[[tissue]],msg=paste("Tissue =", tissue,"\ngbm top 4 vs. normal compare")) }
}
main.modelGbmHumanMouse <- function(){
cat("******************************** modelling gbm in human....\n")
modelGBM.human()
cat("done")
cat("******************************** modeling gbm in mouse...\n")
modelGBM()
cat("done")
}
main <- function(){
cat("******************************** running heart...\n")
main.heart()
cat("******************************** running forebrain...\n")
main.forebrain()
cat("******************************** assessing GBM model on entire dataset...\n")
modelGBM()
cat("******************************** done...\n")
}
runHuman <- function(){
cat("******************************** beating human heart...\n")
#main.humanHeart();
cat("******************************** thinking brain...\n")
main.humanBrain()
cat("******************************** assessing GBM model on entire dataset...\n")
modelGBM()
cat("******************************** done...\n")
}
runPredOnGenomeData <- function(){
cat("mouse heart")
main.mouse.heart.test()
cat("mouse forebrain")
main.mouse.forebrain.test()
cat("human heart")
main.human.heart.test()
}
|
\name{MS_removeDrugs}
\alias{MS_removeDrugs}
\title{Remove edges containing drug nodes}
\description{
This function allows removing edges containing drug ("dr:") nodes.
}
\usage{
MS_removeDrugs(network_table)
}
\arguments{
\item{network_table}{three-column matrix where each row represents and edge
between two nodes. See function "MS_keggNetwork( )".
}
}
\value{
A three-column matrix corresponding to the input network-table without the drug
nodes.
}
\examples{
data(MetaboSignal_table)
# Remove drug nodes if present
drugsRemoved <- MS_removeDrugs(MetaboSignal_table)
}
| /man/MS_removeDrugs.Rd | no_license | AndreaRMICL/MetaboSignal | R | false | false | 581 | rd | \name{MS_removeDrugs}
\alias{MS_removeDrugs}
\title{Remove edges containing drug nodes}
\description{
This function allows removing edges containing drug ("dr:") nodes.
}
\usage{
MS_removeDrugs(network_table)
}
\arguments{
\item{network_table}{three-column matrix where each row represents and edge
between two nodes. See function "MS_keggNetwork( )".
}
}
\value{
A three-column matrix corresponding to the input network-table without the drug
nodes.
}
\examples{
data(MetaboSignal_table)
# Remove drug nodes if present
drugsRemoved <- MS_removeDrugs(MetaboSignal_table)
}
|
# Vectors 1
# we're going to do some random draws, so set the
# random seed so we get the same results each time (starts random sequence in same place)
set.seed(1234)
# Vector with 5 random (normal) values
x1 <- rnorm(5)
x1
# Which values in x1 are less than 0?
# Compare x1 to 0
# Create another vector named x2 with 5 random (normal) values in it
# (copy and modify the code in line 9 above)
# drawn from distribution with mean 0.5;
# look in the distributions section on the reference card
# https://cran.r-project.org/doc/contrib/Baggott-refcard-v2.pdf
# or the help for the rnorm function
# to see how to specify the mean (it's another argument to rnorm())
# Compare the maximum values of each vector;
# Is the max of x1 greater than x2?
# look in the Math section of the reference card for the function name
# to get the maximum
# Compare the mean values of each vector;
# Is the mean of x1 greater than x2?
# look in the Math section of the reference card for function name
# Add the values 2, 3, and 4 to the end of x1, look at the result
| /exercises/part1/vectors1.R | permissive | hkejigu/r-online-2020 | R | false | false | 1,066 | r | # Vectors 1
# we're going to do some random draws, so set the
# random seed so we get the same results each time (starts random sequence in same place)
set.seed(1234)
# Vector with 5 random (normal) values
x1 <- rnorm(5)
x1
# Which values in x1 are less than 0?
# Compare x1 to 0
# Create another vector named x2 with 5 random (normal) values in it
# (copy and modify the code in line 9 above)
# drawn from distribution with mean 0.5;
# look in the distributions section on the reference card
# https://cran.r-project.org/doc/contrib/Baggott-refcard-v2.pdf
# or the help for the rnorm function
# to see how to specify the mean (it's another argument to rnorm())
# Compare the maximum values of each vector;
# Is the max of x1 greater than x2?
# look in the Math section of the reference card for the function name
# to get the maximum
# Compare the mean values of each vector;
# Is the mean of x1 greater than x2?
# look in the Math section of the reference card for function name
# Add the values 2, 3, and 4 to the end of x1, look at the result
|
# FUNCTION 8
#' Plot the input trajectory according to the types of flight
#' @param x x-position from data
#' @param y y-position from data
#' @param z z-position from data
#' @param FlightPerformance requires flighttype information from \code{FlightPerformance}
#' @return 3D scatterplot of flight types from the input trajectory
#' @import scatterplot3d
#' @examples
#' utils::data(trajectory)
#' trajectory = trajectory
#' bird = BirdMorphParam(BMass = 0.7710, WSpan = 0.98, WArea = 0.119, C_db = 0.1)
#' FSComponents=FlightSpeedComponents(t=trajectory[,4],
#' x=trajectory[,1], y=trajectory[,2], z=trajectory[,3])
#' Tairspeed = TrueAirSpeed1(FSComponents)
#' FPerformance = FlightPerformance(bird,FSComponents, Tairspeed,
#' C_l = 0.5, C_t = 0.1)
#'
#' PlotFlightType(x=trajectory[,1], y=trajectory[,2], z=trajectory[,3], FPerformance)
#' @export
PlotFlightType = function(x,y,z, FlightPerformance){
pol = c("blue", "red", "green")
pol <- pol[as.numeric(FlightPerformance$flighttype)]
scatterplot3d(x=x[1:length(FlightPerformance$x)], y=y[1:length(FlightPerformance$y)],
z=z[1:length(FlightPerformance$z)], color=pol,type = 'o',cex.symbols = 0.05,
xlab = 'Latitude (m)', ylab = 'Longitude (m)', zlab = 'Height (m)',cex.lab = 1.1, cex.axis = 0.9,
main = 'Different flight types',cex.main=1.2)
legend("right", legend = c("Descent","Climb","Steady"),
col = c("blue", "red","green"), pch = 16, inset = -0.06, xpd = TRUE, horiz = FALSE,cex=0.8 )
}
| /R/PlotFlightTypes.R | no_license | Josephine-Tetteh/FlightSim-updates | R | false | false | 1,587 | r | # FUNCTION 8
#' Plot the input trajectory according to the types of flight
#' @param x x-position from data
#' @param y y-position from data
#' @param z z-position from data
#' @param FlightPerformance requires flighttype information from \code{FlightPerformance}
#' @return 3D scatterplot of flight types from the input trajectory
#' @import scatterplot3d
#' @examples
#' utils::data(trajectory)
#' trajectory = trajectory
#' bird = BirdMorphParam(BMass = 0.7710, WSpan = 0.98, WArea = 0.119, C_db = 0.1)
#' FSComponents=FlightSpeedComponents(t=trajectory[,4],
#' x=trajectory[,1], y=trajectory[,2], z=trajectory[,3])
#' Tairspeed = TrueAirSpeed1(FSComponents)
#' FPerformance = FlightPerformance(bird,FSComponents, Tairspeed,
#' C_l = 0.5, C_t = 0.1)
#'
#' PlotFlightType(x=trajectory[,1], y=trajectory[,2], z=trajectory[,3], FPerformance)
#' @export
PlotFlightType = function(x,y,z, FlightPerformance){
pol = c("blue", "red", "green")
pol <- pol[as.numeric(FlightPerformance$flighttype)]
scatterplot3d(x=x[1:length(FlightPerformance$x)], y=y[1:length(FlightPerformance$y)],
z=z[1:length(FlightPerformance$z)], color=pol,type = 'o',cex.symbols = 0.05,
xlab = 'Latitude (m)', ylab = 'Longitude (m)', zlab = 'Height (m)',cex.lab = 1.1, cex.axis = 0.9,
main = 'Different flight types',cex.main=1.2)
legend("right", legend = c("Descent","Climb","Steady"),
col = c("blue", "red","green"), pch = 16, inset = -0.06, xpd = TRUE, horiz = FALSE,cex=0.8 )
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conversion-utilities.R
\name{QG.alpha}
\alias{QG.alpha}
\title{Calculates the alpha parameter from a QG model}
\usage{
QG.alpha(pars)
}
\arguments{
\item{pars}{A bayou formatted parameter list with parameters h2 (heritability), P (phenotypic variance) and w2 (width of adaptive landscape)}
}
\value{
An alpha value according to the equation \code{alpha = h2*P/(P+w2+P)}.
}
\description{
Calculates the alpha parameter from a QG model
}
| /man/QG.alpha.Rd | no_license | uyedaj/bayou | R | false | true | 514 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conversion-utilities.R
\name{QG.alpha}
\alias{QG.alpha}
\title{Calculates the alpha parameter from a QG model}
\usage{
QG.alpha(pars)
}
\arguments{
\item{pars}{A bayou formatted parameter list with parameters h2 (heritability), P (phenotypic variance) and w2 (width of adaptive landscape)}
}
\value{
An alpha value according to the equation \code{alpha = h2*P/(P+w2+P)}.
}
\description{
Calculates the alpha parameter from a QG model
}
|
## File Name: gom_em_extract_lambda_matrix.R
## File Version: 0.01
gom_em_extract_lambda_matrix <- function(lambda_logit, I, K)
{
lambda <- matrix( stats::plogis(lambda_logit), nrow=I, ncol=K)
return(lambda)
}
| /R/gom_em_extract_lambda_matrix.R | no_license | cran/sirt | R | false | false | 227 | r | ## File Name: gom_em_extract_lambda_matrix.R
## File Version: 0.01
gom_em_extract_lambda_matrix <- function(lambda_logit, I, K)
{
lambda <- matrix( stats::plogis(lambda_logit), nrow=I, ncol=K)
return(lambda)
}
|
# Exploratory Data Analysis - Assignment 2 - Q.5
#setting Working Directory
setwd("C:/Users/Makarand/Desktop/Coursera/project")
# Load ggplot2 library
require(ggplot2)
# Loading provided datasets from local machine
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI$year <- factor(NEI$year, levels=c('1999', '2002', '2005', '2008'))
# Baltimore City, Maryland == fips
MD.onroad <- subset(NEI, fips == 24510 & type == 'ON-ROAD')
# Aggregate
MD.df <- aggregate(MD.onroad[, 'Emissions'], by=list(MD.onroad$year), sum)
colnames(MD.df) <- c('year', 'Emissions')
# How have emissions from motor vehicle sources changed from 1999-2008 in Baltimore City?
# Generate the graph in the same directory as the source code
png('plot5.png')
ggplot(data=MD.df, aes(x=year, y=Emissions)) + geom_bar(stat = "identity") + guides(fill=F) +
ggtitle('Total Emissions of Motor Vehicle Sources in Baltimore City, Maryland') +
ylab(expression('PM'[2.5])) + xlab('Year') + theme(legend.position='none') +
geom_text(aes(label=round(Emissions,0), size=1, hjust=0.5, vjust=2))
dev.off()
| /plot5.R | no_license | makrand12/Exploratory-Data-Analysis | R | false | false | 1,152 | r | # Exploratory Data Analysis - Assignment 2 - Q.5
#setting Working Directory
setwd("C:/Users/Makarand/Desktop/Coursera/project")
# Load ggplot2 library
require(ggplot2)
# Loading provided datasets from local machine
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI$year <- factor(NEI$year, levels=c('1999', '2002', '2005', '2008'))
# Baltimore City, Maryland == fips
MD.onroad <- subset(NEI, fips == 24510 & type == 'ON-ROAD')
# Aggregate
MD.df <- aggregate(MD.onroad[, 'Emissions'], by=list(MD.onroad$year), sum)
colnames(MD.df) <- c('year', 'Emissions')
# How have emissions from motor vehicle sources changed from 1999-2008 in Baltimore City?
# Generate the graph in the same directory as the source code
png('plot5.png')
ggplot(data=MD.df, aes(x=year, y=Emissions)) + geom_bar(stat = "identity") + guides(fill=F) +
ggtitle('Total Emissions of Motor Vehicle Sources in Baltimore City, Maryland') +
ylab(expression('PM'[2.5])) + xlab('Year') + theme(legend.position='none') +
geom_text(aes(label=round(Emissions,0), size=1, hjust=0.5, vjust=2))
dev.off()
|
#' print.bayesOmicAssoc
#'
#' @param x object of class 'bayesOmic'
#' @param x
#' @param ...
#'
#' @S3method print bayesOmic
print.bayesOmic <- function(x, ...)
{
alpha<-x$res.summary$alpha
beta<-x$res.summary$beta
cat("\n Intercepts (alpha): \n")
print(alpha)
cat("\n Coefficients of shared components (beta): \n")
print(beta)
cat("\n Use 'getSpecific()' and 'getShared()' functions to get specific or shared components, respectively. \n")
}
| /R/print.bayesOmic.R | no_license | isglobal-brge/bayesOmic | R | false | false | 464 | r | #' print.bayesOmicAssoc
#'
#' @param x object of class 'bayesOmic'
#' @param x
#' @param ...
#'
#' @S3method print bayesOmic
print.bayesOmic <- function(x, ...)
{
alpha<-x$res.summary$alpha
beta<-x$res.summary$beta
cat("\n Intercepts (alpha): \n")
print(alpha)
cat("\n Coefficients of shared components (beta): \n")
print(beta)
cat("\n Use 'getSpecific()' and 'getShared()' functions to get specific or shared components, respectively. \n")
}
|
library(tidyverse)
Clean_data <- read_csv('./data/GaofengCleaning.csv')
all_state_wnh <- Clean_data %>%
group_by(state_abbreviation) %>%
summarise(white_nohisp = sum(white_nohisp*pop14)/sum(pop14)) %>%
ggplot(aes(x=state_abbreviation, y = white_nohisp)) +
geom_histogram(stat = "identity")
East_Coast <- c('FL',
'GA',
'SC',
'NC',
'VA',
'MD',
'DE',
'NJ',
'NY',
'CT',
'RI',
'MA',
'NH',
'ME')
Age18 <- Clean_data %>%
mutate(iage18 = cut(age18,breaks = c(0,5,10,15,20,25,30,35,40,45)), Coast = ifelse(pop14 > mean(pop14),'urban', 'rural')) %>%
ggplot(aes(x=iage18,fill=as.factor(Coast))) + geom_bar(position = 'dodge')
# No_farm -- for democract
Clean_data %>%
filter(Private_nonfarm_establishments_per_person >= 0.04) %>%
mutate(no_farm= cut(Private_nonfarm_establishments_per_person, breaks=c(0,0.02,0.04,0.06,0.08,10))) %>%
ggplot(aes(x=no_farm,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('Percent non_farm_establishments_per_person') + ylab('Number of county win')
# White
Clean_data %>%
mutate(White = cut(white, breaks=c(0,5,10,15,20,100))) %>%
ggplot(aes(x=White,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('percentage of white people') + ylab('Number of county win')
Clean_data %>%
mutate(afAm = cut(AfAm, breaks=c(0,5,10,15,20,100))) %>%
ggplot(aes(x=afAm,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('percentage of AfAm people') + ylab('Number of county win')
# age 18
Clean_data %>%
mutate(age18 = cut(age18, breaks=c(10,15,20,21,22,23,24,25,26,27,28,29,30,35,40,50))) %>%
ggplot(aes(x=age18,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('percentage of people under 18') + ylab('Number of county win')
# age 5
Clean_data %>%
mutate(age5 = cut(age5, breaks=c(10,15,20,21,22,23,24,25,26,27,28,29,30,35,40,50))) %>%
ggplot(aes(x=age5,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('percentage of people under 18') + ylab('Number of county win')
# income house
Clean_data %>%
mutate(income_house = cut(income_house, breaks=c(0,30000,60000,90000,120000,200000))) %>%
ggplot(aes(x=income_house, fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('income per house') + ylab('Number of county win')
# Check
AL <- Clean_data %>%
filter(state_abbreviation == 'AL')
sum(AL$white_nohisp*AL$pop14)/sum(AL$pop14)
# Clean
| /data visiualize.R | no_license | UCSDWayneTang/2016-primary-election | R | false | false | 2,589 | r | library(tidyverse)
Clean_data <- read_csv('./data/GaofengCleaning.csv')
all_state_wnh <- Clean_data %>%
group_by(state_abbreviation) %>%
summarise(white_nohisp = sum(white_nohisp*pop14)/sum(pop14)) %>%
ggplot(aes(x=state_abbreviation, y = white_nohisp)) +
geom_histogram(stat = "identity")
East_Coast <- c('FL',
'GA',
'SC',
'NC',
'VA',
'MD',
'DE',
'NJ',
'NY',
'CT',
'RI',
'MA',
'NH',
'ME')
Age18 <- Clean_data %>%
mutate(iage18 = cut(age18,breaks = c(0,5,10,15,20,25,30,35,40,45)), Coast = ifelse(pop14 > mean(pop14),'urban', 'rural')) %>%
ggplot(aes(x=iage18,fill=as.factor(Coast))) + geom_bar(position = 'dodge')
# No_farm -- for democract
Clean_data %>%
filter(Private_nonfarm_establishments_per_person >= 0.04) %>%
mutate(no_farm= cut(Private_nonfarm_establishments_per_person, breaks=c(0,0.02,0.04,0.06,0.08,10))) %>%
ggplot(aes(x=no_farm,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('Percent non_farm_establishments_per_person') + ylab('Number of county win')
# White
Clean_data %>%
mutate(White = cut(white, breaks=c(0,5,10,15,20,100))) %>%
ggplot(aes(x=White,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('percentage of white people') + ylab('Number of county win')
Clean_data %>%
mutate(afAm = cut(AfAm, breaks=c(0,5,10,15,20,100))) %>%
ggplot(aes(x=afAm,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('percentage of AfAm people') + ylab('Number of county win')
# age 18
Clean_data %>%
mutate(age18 = cut(age18, breaks=c(10,15,20,21,22,23,24,25,26,27,28,29,30,35,40,50))) %>%
ggplot(aes(x=age18,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('percentage of people under 18') + ylab('Number of county win')
# age 5
Clean_data %>%
mutate(age5 = cut(age5, breaks=c(10,15,20,21,22,23,24,25,26,27,28,29,30,35,40,50))) %>%
ggplot(aes(x=age5,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('percentage of people under 18') + ylab('Number of county win')
# income house
Clean_data %>%
mutate(income_house = cut(income_house, breaks=c(0,30000,60000,90000,120000,200000))) %>%
ggplot(aes(x=income_house, fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('income per house') + ylab('Number of county win')
# Check
AL <- Clean_data %>%
filter(state_abbreviation == 'AL')
sum(AL$white_nohisp*AL$pop14)/sum(AL$pop14)
# Clean
|
######################################################################
## ##
## This is the Cleaning and Getting Data Course Project R script ##
## ##
## Author: Jose M. Pi ##
## Date: 11/10/2017 ##
## Version 1.0 ##
## ##
######################################################################
##############################
## SETUP WORKING DIRECTORY ##
##############################
setwd("~/Documents/Data Science/Getting and Cleaning Data/Week 4/Project")
## Create the subdirectory "data" if not already found.
if(!file.exists("./data")){dir.create("./data")}
######################
## LOAD LIBRARY ##
######################
##plyr library
library(plyr)
##############################
## FILE RETRIEVAL SECTION ##
##############################
## Get the zip file for the project.
## Assign the file link/ location to fileUrl.
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
## Download the file
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
## Unzip the file contents into the data subdirectory.
unzip(zipfile="./data/Dataset.zip",exdir="./data")
## Set files path to reach all subdirectories.
path_rf <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
##############################
## DATA LOADING SECTION ##
##############################
## ACTIVITY DATA ##
## Load the data files for Activity info from the Y_test and Y_train.txt files in the respective
## Activity data frames (ActivityTest and ActivityTrain).
## These data frames will contain the activity codes from the accelerometer files - here is the activity code list:
## 1. Walking
## 2. Walking-upstairs
## 3. Walking_downstatirs
## 4. Siting
## 5. Standing
## 6. Laying.
ActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
## ActivityTest data frame contains 2,957 rows and 1 column.
ActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
## ActivityTrain data frame contains 7,352 rows and 1 column.
## SUBJECT DATA ##
## These data frames will contain the subject/ user number from the accelerometer files.
## Load the data files for Subject info from the subject_test and subject_train.txt files in the respective
## Subject data frames (SubjectTest and SubjectTrain).
SubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
## SubjectTest data frame contains 2,957 rows and 1 column.
SubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
## SubjectTrain data frame contains 7,352 rows and 1 column.
## OBSERVATIONS/ MEASUREMENTS ##
## These data frames will contain the observations from the accelerometer files.
## Load the data files for Features info from the x_test and x_train.txt files in the respective
## Feature data frames (FeaturesTest and FeaturesTrain).
FeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
## FeaturesTest data frame contains 2,957 rows and 561 columns.
FeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
## FeaturesTrain data frame contains 7,352 rows and 561 columns.
##########################
## DATA MERGE SECTION ##
##########################
## Prepare the data frames with descriptive coumn names, then merge the data.
## Bind the Activity data frame sets into the Activity data frame.
## The Activity data frame contrains all Activity data from ActivtyTrain and ActivityTest data frames.
Activity <- rbind(ActivityTrain, ActivityTest)
## The Activity data frame contrains 10,299 rows and 1 column.
## Bind the Subject data frame sets into the Subject data frame.
## The Subject data frame contrains all Subject data from SubjectTrain and SubjectTest data frames.
Subject <- rbind(SubjectTrain, SubjectTest)
## The Subject data frame contrains 10,299 rows and 1 column.
## Bind the Features data frame sets into the Festures data frame.
## The Features data frame contrains all the observations data from FeaturesTrain and FeaturesTest data frames.
Features <- rbind(FeaturesTrain, FeaturesTest)
## The Features data frame contrains 10,299 rows and 561 columns.
## SET COLUMN NAMES
## Set column name for Subject data frame
names(Subject)<-c("subject")
## Set column name for Activity data frame
names(Activity)<- c("activity")
## Set column name for Features data frame from features.txt
FeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
## The FeaturesNames data frame contrains 561 rows and 2 columns.
## Name the columns in the Features data frame based on the FeatureNames 2nd column, which contains the column names.
names(Features)<- FeaturesNames$V2
## MERGE ALL DATA
## Bind the Subject and Activity data frames into MergeSubjectActivity data frame.
MergeSubjectActivity <- cbind(Subject, Activity)
## This bind yields the MergeSubjectActivity data frame with 10,299 rows and 2 columns (subject and activity columns).
## Bind the Features and MergeSubjectActivity data frames into MergeInfo data frame.
MergeInfo <- cbind(Features, MergeSubjectActivity)
## This bind yields the MergeInfo data frame with 10,299 rows and 563 columns (adds the Subject and Activity columns to the MergInfo data set).
##########################################
## EXTRACT OBSERVATIONS/ MEASUREMENTS ##
##########################################
## Create a factor list of only the column names that match "mean()" and std()" from MergeInfo.
subsetFeaturesNames <- FeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", FeaturesNames$V2)]
## Add "subject" and "activity" column names to the subsetNames factor list.
subsetNames <-c(as.character(subsetFeaturesNames), "subject", "activity" )
## Subset the data into MergeInfo based on the Select statement column names in subsetNames
## which contain columns with the std() and mean() observations, with activity and subject data.
MergeInfo <-subset(MergeInfo,select=subsetNames)
## MergeInfo subset is created yielding 10,299 rows and 68 columns (selected columns from subsetNames).
################################################################
## GIVE DESCRIPTIVE ACTIVITY AND COLUMN NAMES TO DATA SET ##
################################################################
## Load the activity names from activity_lables.txt file.
activityNames <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
## Factorize the activity column in MergeInfo - converts the activity code number to an activity description.
MergeInfo$activity<- factor(MergeInfo$activity,labels=as.character(activityNames$V2))
## Give descriptive lables to the MergeInfo data frame.
names(MergeInfo)<-gsub("^t", "time", names(MergeInfo))
names(MergeInfo)<-gsub("^f", "frequency", names(MergeInfo))
names(MergeInfo)<-gsub("Acc", "Accelerometer", names(MergeInfo))
names(MergeInfo)<-gsub("Gyro", "Gyroscope", names(MergeInfo))
names(MergeInfo)<-gsub("Mag", "Magnitude", names(MergeInfo))
names(MergeInfo)<-gsub("BodyBody", "Body", names(MergeInfo))
################################################
## Write MergeInfo to the MergeInfo.csv file ##
################################################
# Write the MergeInfo to a CSV file.
write.table(MergeInfo, file = "MergeInfo.csv",row.name=FALSE, sep = ",")
######################################
## CREATE SECOND TIDY DATA SET ##
######################################
## Average each accelerometer variable for each activity and each subject/ user.
MergeInfo2 <-aggregate(. ~subject + activity, MergeInfo, mean)
## Creates a tidy set of 180 rows and 68 columns
## Order in ascending fashion the data data set by the subject column first and activity column second.
MergeInfo2 <-MergeInfo2[order(MergeInfo2$subject,MergeInfo2$activity),]
## Write MergeInfo2 to a text file.
write.table(MergeInfo2, file = "MergeInfoTidyData.txt",row.name=FALSE)
| /run_analysis.R | no_license | josempi/Getting-and-Cleaning-Data-Course-Project | R | false | false | 8,295 | r | ######################################################################
## ##
## This is the Cleaning and Getting Data Course Project R script ##
## ##
## Author: Jose M. Pi ##
## Date: 11/10/2017 ##
## Version 1.0 ##
## ##
######################################################################
##############################
## SETUP WORKING DIRECTORY ##
##############################
setwd("~/Documents/Data Science/Getting and Cleaning Data/Week 4/Project")
## Create the subdirectory "data" if not already found.
if(!file.exists("./data")){dir.create("./data")}
######################
## LOAD LIBRARY ##
######################
##plyr library
library(plyr)
##############################
## FILE RETRIEVAL SECTION ##
##############################
## Get the zip file for the project.
## Assign the file link/ location to fileUrl.
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
## Download the file
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
## Unzip the file contents into the data subdirectory.
unzip(zipfile="./data/Dataset.zip",exdir="./data")
## Set files path to reach all subdirectories.
path_rf <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
##############################
## DATA LOADING SECTION ##
##############################
## ACTIVITY DATA ##
## Load the data files for Activity info from the Y_test and Y_train.txt files in the respective
## Activity data frames (ActivityTest and ActivityTrain).
## These data frames will contain the activity codes from the accelerometer files - here is the activity code list:
## 1. Walking
## 2. Walking-upstairs
## 3. Walking_downstatirs
## 4. Siting
## 5. Standing
## 6. Laying.
ActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
## ActivityTest data frame contains 2,957 rows and 1 column.
ActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
## ActivityTrain data frame contains 7,352 rows and 1 column.
## SUBJECT DATA ##
## These data frames will contain the subject/ user number from the accelerometer files.
## Load the data files for Subject info from the subject_test and subject_train.txt files in the respective
## Subject data frames (SubjectTest and SubjectTrain).
SubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
## SubjectTest data frame contains 2,957 rows and 1 column.
SubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
## SubjectTrain data frame contains 7,352 rows and 1 column.
## OBSERVATIONS/ MEASUREMENTS ##
## These data frames will contain the observations from the accelerometer files.
## Load the data files for Features info from the x_test and x_train.txt files in the respective
## Feature data frames (FeaturesTest and FeaturesTrain).
FeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
## FeaturesTest data frame contains 2,957 rows and 561 columns.
FeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
## FeaturesTrain data frame contains 7,352 rows and 561 columns.
##########################
## DATA MERGE SECTION ##
##########################
## Prepare the data frames with descriptive coumn names, then merge the data.
## Bind the Activity data frame sets into the Activity data frame.
## The Activity data frame contrains all Activity data from ActivtyTrain and ActivityTest data frames.
Activity <- rbind(ActivityTrain, ActivityTest)
## The Activity data frame contrains 10,299 rows and 1 column.
## Bind the Subject data frame sets into the Subject data frame.
## The Subject data frame contrains all Subject data from SubjectTrain and SubjectTest data frames.
Subject <- rbind(SubjectTrain, SubjectTest)
## The Subject data frame contrains 10,299 rows and 1 column.
## Bind the Features data frame sets into the Festures data frame.
## The Features data frame contrains all the observations data from FeaturesTrain and FeaturesTest data frames.
Features <- rbind(FeaturesTrain, FeaturesTest)
## The Features data frame contrains 10,299 rows and 561 columns.
## SET COLUMN NAMES
## Set column name for Subject data frame
names(Subject)<-c("subject")
## Set column name for Activity data frame
names(Activity)<- c("activity")
## Set column name for Features data frame from features.txt
FeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
## The FeaturesNames data frame contrains 561 rows and 2 columns.
## Name the columns in the Features data frame based on the FeatureNames 2nd column, which contains the column names.
names(Features)<- FeaturesNames$V2
## MERGE ALL DATA
## Bind the Subject and Activity data frames into MergeSubjectActivity data frame.
MergeSubjectActivity <- cbind(Subject, Activity)
## This bind yields the MergeSubjectActivity data frame with 10,299 rows and 2 columns (subject and activity columns).
## Bind the Features and MergeSubjectActivity data frames into MergeInfo data frame.
MergeInfo <- cbind(Features, MergeSubjectActivity)
## This bind yields the MergeInfo data frame with 10,299 rows and 563 columns (adds the Subject and Activity columns to the MergInfo data set).
##########################################
## EXTRACT OBSERVATIONS/ MEASUREMENTS ##
##########################################
## Create a factor list of only the column names that match "mean()" and std()" from MergeInfo.
subsetFeaturesNames <- FeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", FeaturesNames$V2)]
## Add "subject" and "activity" column names to the subsetNames factor list.
subsetNames <-c(as.character(subsetFeaturesNames), "subject", "activity" )
## Subset the data into MergeInfo based on the Select statement column names in subsetNames
## which contain columns with the std() and mean() observations, with activity and subject data.
MergeInfo <-subset(MergeInfo,select=subsetNames)
## MergeInfo subset is created yielding 10,299 rows and 68 columns (selected columns from subsetNames).
################################################################
## GIVE DESCRIPTIVE ACTIVITY AND COLUMN NAMES TO DATA SET ##
################################################################
## Load the activity names from activity_lables.txt file.
activityNames <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
## Factorize the activity column in MergeInfo - converts the activity code number to an activity description.
MergeInfo$activity<- factor(MergeInfo$activity,labels=as.character(activityNames$V2))
## Give descriptive lables to the MergeInfo data frame.
names(MergeInfo)<-gsub("^t", "time", names(MergeInfo))
names(MergeInfo)<-gsub("^f", "frequency", names(MergeInfo))
names(MergeInfo)<-gsub("Acc", "Accelerometer", names(MergeInfo))
names(MergeInfo)<-gsub("Gyro", "Gyroscope", names(MergeInfo))
names(MergeInfo)<-gsub("Mag", "Magnitude", names(MergeInfo))
names(MergeInfo)<-gsub("BodyBody", "Body", names(MergeInfo))
################################################
## Write MergeInfo to the MergeInfo.csv file ##
################################################
# Write the MergeInfo to a CSV file.
write.table(MergeInfo, file = "MergeInfo.csv",row.name=FALSE, sep = ",")
######################################
## CREATE SECOND TIDY DATA SET ##
######################################
## Average each accelerometer variable for each activity and each subject/ user.
MergeInfo2 <-aggregate(. ~subject + activity, MergeInfo, mean)
## Creates a tidy set of 180 rows and 68 columns
## Order in ascending fashion the data data set by the subject column first and activity column second.
MergeInfo2 <-MergeInfo2[order(MergeInfo2$subject,MergeInfo2$activity),]
## Write MergeInfo2 to a text file.
write.table(MergeInfo2, file = "MergeInfoTidyData.txt",row.name=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_bettis.R
\name{surface_nshpere}
\alias{surface_nshpere}
\title{Calculate surface area of a n-dimensional sphere}
\usage{
surface_nshpere(n, r = 1)
}
\arguments{
\item{n}{dimension of the sphere}
\item{r}{radius of the sphere}
}
\value{
surface area of a n-dimensional sphere
}
\description{
Calculate surface area of a n-dimensional sphere
}
| /man/surface_nshpere.Rd | permissive | jetstreamokayasu/usephacm | R | false | true | 429 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_bettis.R
\name{surface_nshpere}
\alias{surface_nshpere}
\title{Calculate surface area of a n-dimensional sphere}
\usage{
surface_nshpere(n, r = 1)
}
\arguments{
\item{n}{dimension of the sphere}
\item{r}{radius of the sphere}
}
\value{
surface area of a n-dimensional sphere
}
\description{
Calculate surface area of a n-dimensional sphere
}
|
library(LS2W)
### Name: LS2Wsim.cddews
### Title: Simulate an LS2W process with underlying Daubechies wavelet.
### Aliases: LS2Wsim.cddews
### Keywords: datagen
### ** Examples
#Generate an empty spectrum
#
Spectrum<-cddews(matrix(0,64,64),smooth=FALSE)
#
#Add power at the first scale, in the vertical direction
#
Spectrum$S[1,,]<-matrix(1,64,64)
#
# Simulate an LS2W process with this structure
#
testimage<- LS2Wsim(Spectrum)
#
| /data/genthat_extracted_code/LS2W/examples/LS2Wsim.cddews.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 510 | r | library(LS2W)
### Name: LS2Wsim.cddews
### Title: Simulate an LS2W process with underlying Daubechies wavelet.
### Aliases: LS2Wsim.cddews
### Keywords: datagen
### ** Examples
#Generate an empty spectrum
#
Spectrum<-cddews(matrix(0,64,64),smooth=FALSE)
#
#Add power at the first scale, in the vertical direction
#
Spectrum$S[1,,]<-matrix(1,64,64)
#
# Simulate an LS2W process with this structure
#
testimage<- LS2Wsim(Spectrum)
#
|
setwd("C:/Users/faaez/OneDrive - Central European University/Current Courses/NLP")
library(tidyverse)
library(stringr)
library(tidytext)
library(textstem)
library(spacyr)
library(textdata)
library(h2o)
library(skimr)
df <- read_csv("mbti_1.csv")
make_features <- function(df) {
cleantext <- function(text){
text <- gsub("/r/[0-9A-Za-z]", "", text)
text <- gsub('(http[^ ]*)|(www\\.[^ ]*)', "", text)
text <- gsub("[]|||[]", " ", text)
text <- lemmatize_strings(text)
return(text)
}
check_type <- function(word){
if(word %in% MBTI_types){
return(TRUE)
} else {
return(FALSE)
}
}
cleanword <- function(word) {
word <- str_to_lower(word)
word <- str_replace(word, " [^A-Za-z]", "")
word <- str_replace(word, '\\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,}\\b', "")
return(word)
}
cleantypes <- function(word){
if(word %in% MBTI_types){
return(NA)
}
return(word)
}
MBTI_types <- c('INFJ', 'ENTP', 'INTP', 'INTJ', 'ENTJ', 'ENFJ', 'INFP', 'ENFP', 'ISFP', 'ISTP', 'ISFJ', 'ISTJ', 'ESTP', 'ESFP', 'ESTJ', 'ESFJ','MBTI')
MBTI_types <- MBTI_types %>% map(str_to_lower)
df$posts <- df$posts %>% map(cleantext)
df <- df %>% mutate(id = rownames(df))
df1 <- df %>% unnest_tokens(word, posts, token = "tweets", to_lower = FALSE)
x <- "public speaking class a few years ago and Ive sort of learned what I could do better were I to be in that position again. A big part of my failure was just overloading myself with too... I like this persons mentality. He's a confirmed INTJ"
df2 <- df1
df2$word <- df2$word %>% map_chr(cleanword)
df2$word <- df2$word %>% map_chr(cleantypes)
df2 <- na.omit(df2)
df2
#regex for url = (?:(?:https?|ftp):\/\/)?[\w/\-?=%.]+\.[\w/\-?=%.]+
#write_csv(df2, "words_list.csv")
data("stop_words")
stop_words
df2 <- df2 %>% anti_join(stop_words, by = "word")
df2 <- df2 %>% left_join(parts_of_speech, by = "word")
df2 <- df2 %>% mutate(pos = ifelse(is.na(pos), "Noun", pos))
df2 <- distinct(df2 %>% mutate(pos = ifelse(str_detect(pos, "Noun"), "Noun",
ifelse(str_detect(pos, "Adverb"), "Adver",
ifelse(str_detect(pos, "Verb"), "Verb",
ifelse(str_detect(pos, "Article"), "Article", pos))))))
df_count <- distinct(df2 %>% group_by(id, pos) %>% transmute(type, count = n()))
df_count_total <- distinct(df2 %>% group_by(id) %>% transmute(total_count = n()))
unique(df2$pos)
df_for <- spread(df_count, pos, count, fill = 0)
df_for <- merge(df_count_total, df_for) %>% rowwise() %>% mutate(for_score = 0.5*sum(Noun,Adjective,Preposition,Article)/total_count - 0.5*sum(Pronoun,Verb,Adver,Interjection)/total_count)
df_idf1 <- df2 %>% count(id, word, sort = TRUE)
df_idf2 <- df_idf1 %>% group_by(id) %>% summarise(total = sum(n))
df_idf <- df_idf1 %>%
bind_tf_idf(word, id, n)
df_idf <- left_join(df_idf, df2 %>% transmute(id, type) %>% distinct())
library(forcats)
plot_physics <- df_idf %>%
mutate(word = fct_reorder(word, tf_idf)) %>%
mutate(type = as.factor(type))
plot_physics %>%
group_by(type) %>%
top_n(5, tf_idf) %>%
ungroup() %>%
mutate(word = reorder(word, tf_idf)) %>%
ggplot(aes(word, tf_idf, fill = type)) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = "tf-idf") +
facet_wrap(~type, ncol = 2, scales = "free") +
coord_flip()
df_idf <- df_idf %>% group_by(id) %>% summarise(avg_tf_idf = mean(tf_idf))
nrc <- get_sentiments("nrc")
df_nrc <- left_join(df2, nrc)
df_nrc1 <- na.omit(df_nrc) %>% group_by(id, sentiment) %>% transmute(count = n()) %>% distinct()
df_nrc1 <- spread(df_nrc1, sentiment, count, fill = 0)
df_nrc1 <- left_join(df_nrc1, df_idf2)
df_nrc1 <- df_nrc1 %>% transmute(anger = anger/total,
anticipation = anticipation/total,
disgust = disgust/total,
fear = fear/total,
joy = joy/total,
sentiment = (positive - negative)/total,
sadness = sadness/total,
surprise = surprise/total,
trust = trust/total)
df <- left_join(df_for, df_nrc1)
df_wlen <- df1 %>% mutate(len_w = nchar(word)) %>% group_by(id) %>% transmute(avg_word_length = mean(len_w)) %>% distinct()
df_vad <- left_join(df2, lexicon_nrc_vad(), by = c("word" = "Word")) %>% na.omit() %>% transmute(id, Valence, Arousal, Dominance)
df_vad <- df_vad %>% group_by(id) %>% summarise(avg_valence = mean(Valence), avg_arousal = mean(Arousal), avg_dominance = mean(Dominance))
df <- left_join(df, df_wlen)
df <- left_join(df, df_vad)
df <- left_join(df, df_idf)
df <- df %>% select(id, type, for_score, anger, anticipation, disgust, fear, joy, sentiment, sadness, surprise, trust, avg_word_length, avg_valence, avg_arousal, avg_dominance, avg_tf_idf) %>% mutate(type = as.factor(type))
return(df)
}
###########################
df_final <- df %>% select(!id)
h2o.init()
data <- as.h2o(df_final)
splitted_data <- h2o.splitFrame(data,
ratios = c(0.7),
seed = 123)
data_train <- splitted_data[[1]]
data_train$type <- as.factor(data_train$type)
data_valid <- splitted_data[[2]]
data_train <- as.h2o(data_train)
y <- "type"
x <- setdiff(names(data_train), y)
glm_model <- h2o.glm(
x, y,
training_frame = data_train,
family = "multinomial",
alpha = 0,
lambda_search = TRUE,
seed = 123,
nfolds = 5,
keep_cross_validation_predictions = TRUE # this is necessary to perform later stacking
)
gbm_model <- h2o.gbm(
x, y,
training_frame = data_train,
ntrees = 1500,
max_depth = 100,
learn_rate = 0.1,
seed = 123,
nfolds = 5,
distribution = "multinomial",
stopping_metric = "RMSE", stopping_rounds = 3,
stopping_tolerance = 1e-3,
keep_cross_validation_predictions = TRUE
)
print(h2o.rmse(h2o.performance(glm_model, newdata = data_valid)))
print(h2o.rmse(h2o.performance(gbm_model, newdata = data_valid)))
df_test <- data.frame("type" = "entj", "posts" = "Social media today is the single most powerful source of information and disinformation around the world. With that the availability of an online pulpit, the availability of ideology, the availability of people supporting it, there is an increase in the propensity and proclivity of certain behaviours to foster. The world saw an increase in hate crimes, and communal violence in recent years - more so in countries where the populist leaders openly gratify extremist ideas. In terms of relevance to Pakistan, Safoora Goth and Sabeen Mahmud's assassination are linked to educated youth, entailing the process of self-radicalisation through social media.
Also, because of the availability of all kinds of propaganda, the digital environment is becoming conducive to augmented revolutions, riots and uprisings. For some, this is petrifying and for others, it is stimulating. The dramatic ousting of President Hosni Mubarak in Egypt and the lawyers' movement in Pakistan are a few successful examples of social media mobilisation.
It has become easier to manipulate information on the internet, especially for governments looking to do this through surreptitious means - use of chat bots, keyboard armies, fake accounts to name a few. In the past, there were only hand-held anchors on mass media - but today - we have hand-held gizmos, or the mix of both to distort and shape public opinion. The pro-democracy think tank 'Freedom House' reported that manipulation tactics in elections were found in 18 countries including the US in past one year.
Considering that the social media is being weaponised around the world, the governments are extensively looking to stifle opinions. This is particularly arduous as there is no agreed upon distinction on where to draw the line.
The opinion makers around the world are fighting over the civil liberties in terms of 'how much is too much' when it comes to allowing dissent and critique - from Charlie Hebdo in France, to blasphemy cases in Pakistan. Some argue that one's liberty to swing fist ends just where other's nose begins. Others argue that nobody can discuss polemics without offending anyone. And the moderates argue that one should learn the art to discuss controversial topics - it's not about what you say, it's about how you say it. So, by and large, there is no clarity in this debate.
Albeit, hate speech is something that translates into instigation against a person or group on the basis of attributes such as race, religion, ethnic origin, sexual orientation, disability, or gender. In several scenarios, it can translate into instigating people to kill someone without any obvious reason. In Pakistan, there are several laws like PPC-153-A - Promoting enmity between different groups, etc. - and in newly drafted cybercrimes law that goes by the title Prevention of Electronic Crimes Act 2016 (PECA) which criminalise hate speech. But fatwas calling people traitors and blasphemous are commonly hurled. Sometimes, it is used as a tool to stifle opinions and critiques against the state institutions on social media.
svg%3E
Whilst it is a dying notion that civil liberties are cemented across the world, it is 'heads I lose, tails you win' kind of situation for activists and journalists in countries like Pakistan. From activists and bloggers being abducted to journalists being killed, the situation looks horrendous. Social media is the only place where the people used to freely create vain echo chambers to critique the government's actions. But they cannot do it without the fear of getting abducted or killed. The recently passed controversial cybercrimes law can be used to curtail dissent in the digital space as indicated by the interior minister Ahsan Iqbal. It is a slippery slope because of lack of accountability in such cases. While the state should be looking to curtail hate speech, it is looking to snub sane voices in the country.
Social media has transpired into a celebrated tool to delve in all sorts of political commentary, it has now become, more often than not, the only avenue for the mainstream populist narrative
The fact that social media has transpired into a celebrated tool to delve in all sorts of political commentary, it has now become, more often than not, the only tool to mainstream populist narrative. The make-or-break deal comes forth based on where the contradictory argument of the proposed ideology rests. Political leaders who once relied on door-to-door knocks for constituency now pitch - at least a part of - to their electoral proposals online. The former US president Barack Obama had a budget of $16 million to campaign on the internet. His election campaign focused on at least 15 different social media platforms along with other hit-hard online strategies indicating that it worked better than the Republican candidate John McCain's campaign.
While Obama may not be a populist leader, the now-elected president Donald Trump gained popular vote through social media. The art of saying what the majority wants to hear is what a populist leader needs to have supremacy in and Trump knew it better than his rivals in the wake of religious and political turmoil in the United States. Because populist ideas don't necessarily have to be ethical or in accordance with the humanitarian beliefs, Trump's constant badgering by great speeches and reinforcement of the campaign slogan while persecuting minorities on multiple media outlets was what worked in his favour.
In the context of Pakistan, Imran Khan, before the General Elections of 2013, successfully garnered support by communicating with particularly young citizens online about the injustices that they have been subjected to under Asif Ali Zardari's presidency. The vigilantes backed by Pakistan Tehreek-e-Insaf (PTI) actively took over every political debate online accounting for the populist ideology to thrive which was further evident by the 3-month long Azadi March in Islamabad and multiple successful rallies across Pakistan.
While Imran Khan's populist rhetoric was aimed at strengthening his own power by constant claims of rigging in the General Elections of 2013, the cry for removing the then Prime Minister Nawaz Sharif and subsequently demanding re-elections, Pakistan's founding pillars are particularly sentient of religious sentiments - a country that came into being to grant religious freedom to its occupants now rejects all beliefs except Islam. As religious monopoly is a populist expression of the Muslim majority, the political parties routinely target this sentiment to accumulate support, for example the recent protest by Tehreek-e-Labbaik Pakistan (TLP) to challenge the proposed amendments in the Elections Bill 2017.
Although the protest didn't stem from social media, it gained momentum online because of the populist idea that it was based on - Ahmadi persecution. It was noticed that the Twitter followers of the leader of TLP, Khaadim Hussain Rizvi, joined Twitter after the protest was mainstreamed, between October and November 2017. It was as though the protesters had found gold at the end of the rainbow and broadcasted their strength via Facebook live and direct online communication with people with the help of the large support that they have garnered in a matter of days, furthering the fact that Islam is the only way forward in Pakistan.
Populism was once largely believed to be instrumental to the social class and was initially introduced to contradict elite rulers, but in the present form it has been translated into victimising the minorities - religious, social, cultural, political et al. Because the belief suits the sentiments of the majority, the demands stemming from it are taken at their face value while absolutely disregarding the minority. Pakistanis have particularly witnessed, and to rightly say that they have supported, multiple genocides against religious minorities on different instances because the ideologies satisfied the mostly Sunni majority. From the persecution of Hazara people and Shia genocide to Ahmedi killings to routinely Hindu and Christian persecution, "you're free to go to your mosques, temples, and churches" by Jinnah isn't valid in the land of pure anymore. Seventy years of Pakistan's history is evidence enough to indicate that conforming to the demands of the populists never plays out well in the favour of the already marginalised groups of society.
While pure populism is widely critiqued in the political context for its poor chances of success, populist movements have occasionally been ethical in principle where the sole purpose of initiating the movement was to acknowledge minority rights or the rights of those who live on the margins. For example, Podemos, the populist Spanish party, is advocating to grant voting rights to immigrants; the regulations around same sex marriages have received favourable popular votes in the countries where it's legal now.
The role of social media has been remarkable in the modern day left-wing populism. Women's March that took place earlier this year in the United States the same day Donald Trump took his oath as the president was solely organised through Facebook. While the narrative wasn't entirely populist, it gained popularity online and gathered millions of women from around the world against something they believed was worth fighting for - indicating towards the potential of online media to initiate social change.
Online media has the power to mould opinions and reinforce ideologies as its occupants deem fit. Adding populism to the evolving digital dynamics empowers people to be the apologists of extremist rhetoric, and it also has the potential of popularising debate around minority rights in conventional conversations. Whilst governments around the world use the internet to advance their ideologies in the modern days, yet it's one of the biggest threats to their authoritarianism resulting in the routinely attempts to stifle dissent on these mediums through disproportionate measures like internet shutdowns - partial and absolute, forced abductions of sane voices against injustices of oligarchy, targeting the religious sentiments of the masses to obscure minority discourse, and restraining free speech on account of it being hate speech or against national security.")
t <- make_features(df_test) %>% select(!c("type", "id"))
t <- as.h2o(t)
as.tibble(h2o.predict(gbm_model, t))
| /CEU MS/NLP/proj.R | no_license | faaez1riaz/CourseWork | R | false | false | 16,874 | r | setwd("C:/Users/faaez/OneDrive - Central European University/Current Courses/NLP")
library(tidyverse)
library(stringr)
library(tidytext)
library(textstem)
library(spacyr)
library(textdata)
library(h2o)
library(skimr)
df <- read_csv("mbti_1.csv")
make_features <- function(df) {
cleantext <- function(text){
text <- gsub("/r/[0-9A-Za-z]", "", text)
text <- gsub('(http[^ ]*)|(www\\.[^ ]*)', "", text)
text <- gsub("[]|||[]", " ", text)
text <- lemmatize_strings(text)
return(text)
}
check_type <- function(word){
if(word %in% MBTI_types){
return(TRUE)
} else {
return(FALSE)
}
}
cleanword <- function(word) {
word <- str_to_lower(word)
word <- str_replace(word, " [^A-Za-z]", "")
word <- str_replace(word, '\\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,}\\b', "")
return(word)
}
cleantypes <- function(word){
if(word %in% MBTI_types){
return(NA)
}
return(word)
}
MBTI_types <- c('INFJ', 'ENTP', 'INTP', 'INTJ', 'ENTJ', 'ENFJ', 'INFP', 'ENFP', 'ISFP', 'ISTP', 'ISFJ', 'ISTJ', 'ESTP', 'ESFP', 'ESTJ', 'ESFJ','MBTI')
MBTI_types <- MBTI_types %>% map(str_to_lower)
df$posts <- df$posts %>% map(cleantext)
df <- df %>% mutate(id = rownames(df))
df1 <- df %>% unnest_tokens(word, posts, token = "tweets", to_lower = FALSE)
x <- "public speaking class a few years ago and Ive sort of learned what I could do better were I to be in that position again. A big part of my failure was just overloading myself with too... I like this persons mentality. He's a confirmed INTJ"
df2 <- df1
df2$word <- df2$word %>% map_chr(cleanword)
df2$word <- df2$word %>% map_chr(cleantypes)
df2 <- na.omit(df2)
df2
#regex for url = (?:(?:https?|ftp):\/\/)?[\w/\-?=%.]+\.[\w/\-?=%.]+
#write_csv(df2, "words_list.csv")
data("stop_words")
stop_words
df2 <- df2 %>% anti_join(stop_words, by = "word")
df2 <- df2 %>% left_join(parts_of_speech, by = "word")
df2 <- df2 %>% mutate(pos = ifelse(is.na(pos), "Noun", pos))
df2 <- distinct(df2 %>% mutate(pos = ifelse(str_detect(pos, "Noun"), "Noun",
ifelse(str_detect(pos, "Adverb"), "Adver",
ifelse(str_detect(pos, "Verb"), "Verb",
ifelse(str_detect(pos, "Article"), "Article", pos))))))
df_count <- distinct(df2 %>% group_by(id, pos) %>% transmute(type, count = n()))
df_count_total <- distinct(df2 %>% group_by(id) %>% transmute(total_count = n()))
unique(df2$pos)
df_for <- spread(df_count, pos, count, fill = 0)
df_for <- merge(df_count_total, df_for) %>% rowwise() %>% mutate(for_score = 0.5*sum(Noun,Adjective,Preposition,Article)/total_count - 0.5*sum(Pronoun,Verb,Adver,Interjection)/total_count)
df_idf1 <- df2 %>% count(id, word, sort = TRUE)
df_idf2 <- df_idf1 %>% group_by(id) %>% summarise(total = sum(n))
df_idf <- df_idf1 %>%
bind_tf_idf(word, id, n)
df_idf <- left_join(df_idf, df2 %>% transmute(id, type) %>% distinct())
library(forcats)
plot_physics <- df_idf %>%
mutate(word = fct_reorder(word, tf_idf)) %>%
mutate(type = as.factor(type))
plot_physics %>%
group_by(type) %>%
top_n(5, tf_idf) %>%
ungroup() %>%
mutate(word = reorder(word, tf_idf)) %>%
ggplot(aes(word, tf_idf, fill = type)) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = "tf-idf") +
facet_wrap(~type, ncol = 2, scales = "free") +
coord_flip()
df_idf <- df_idf %>% group_by(id) %>% summarise(avg_tf_idf = mean(tf_idf))
nrc <- get_sentiments("nrc")
df_nrc <- left_join(df2, nrc)
df_nrc1 <- na.omit(df_nrc) %>% group_by(id, sentiment) %>% transmute(count = n()) %>% distinct()
df_nrc1 <- spread(df_nrc1, sentiment, count, fill = 0)
df_nrc1 <- left_join(df_nrc1, df_idf2)
df_nrc1 <- df_nrc1 %>% transmute(anger = anger/total,
anticipation = anticipation/total,
disgust = disgust/total,
fear = fear/total,
joy = joy/total,
sentiment = (positive - negative)/total,
sadness = sadness/total,
surprise = surprise/total,
trust = trust/total)
df <- left_join(df_for, df_nrc1)
df_wlen <- df1 %>% mutate(len_w = nchar(word)) %>% group_by(id) %>% transmute(avg_word_length = mean(len_w)) %>% distinct()
df_vad <- left_join(df2, lexicon_nrc_vad(), by = c("word" = "Word")) %>% na.omit() %>% transmute(id, Valence, Arousal, Dominance)
df_vad <- df_vad %>% group_by(id) %>% summarise(avg_valence = mean(Valence), avg_arousal = mean(Arousal), avg_dominance = mean(Dominance))
df <- left_join(df, df_wlen)
df <- left_join(df, df_vad)
df <- left_join(df, df_idf)
df <- df %>% select(id, type, for_score, anger, anticipation, disgust, fear, joy, sentiment, sadness, surprise, trust, avg_word_length, avg_valence, avg_arousal, avg_dominance, avg_tf_idf) %>% mutate(type = as.factor(type))
return(df)
}
###########################
df_final <- df %>% select(!id)
h2o.init()
data <- as.h2o(df_final)
splitted_data <- h2o.splitFrame(data,
ratios = c(0.7),
seed = 123)
data_train <- splitted_data[[1]]
data_train$type <- as.factor(data_train$type)
data_valid <- splitted_data[[2]]
data_train <- as.h2o(data_train)
y <- "type"
x <- setdiff(names(data_train), y)
glm_model <- h2o.glm(
x, y,
training_frame = data_train,
family = "multinomial",
alpha = 0,
lambda_search = TRUE,
seed = 123,
nfolds = 5,
keep_cross_validation_predictions = TRUE # this is necessary to perform later stacking
)
gbm_model <- h2o.gbm(
x, y,
training_frame = data_train,
ntrees = 1500,
max_depth = 100,
learn_rate = 0.1,
seed = 123,
nfolds = 5,
distribution = "multinomial",
stopping_metric = "RMSE", stopping_rounds = 3,
stopping_tolerance = 1e-3,
keep_cross_validation_predictions = TRUE
)
print(h2o.rmse(h2o.performance(glm_model, newdata = data_valid)))
print(h2o.rmse(h2o.performance(gbm_model, newdata = data_valid)))
df_test <- data.frame("type" = "entj", "posts" = "Social media today is the single most powerful source of information and disinformation around the world. With that the availability of an online pulpit, the availability of ideology, the availability of people supporting it, there is an increase in the propensity and proclivity of certain behaviours to foster. The world saw an increase in hate crimes, and communal violence in recent years - more so in countries where the populist leaders openly gratify extremist ideas. In terms of relevance to Pakistan, Safoora Goth and Sabeen Mahmud's assassination are linked to educated youth, entailing the process of self-radicalisation through social media.
Also, because of the availability of all kinds of propaganda, the digital environment is becoming conducive to augmented revolutions, riots and uprisings. For some, this is petrifying and for others, it is stimulating. The dramatic ousting of President Hosni Mubarak in Egypt and the lawyers' movement in Pakistan are a few successful examples of social media mobilisation.
It has become easier to manipulate information on the internet, especially for governments looking to do this through surreptitious means - use of chat bots, keyboard armies, fake accounts to name a few. In the past, there were only hand-held anchors on mass media - but today - we have hand-held gizmos, or the mix of both to distort and shape public opinion. The pro-democracy think tank 'Freedom House' reported that manipulation tactics in elections were found in 18 countries including the US in past one year.
Considering that the social media is being weaponised around the world, the governments are extensively looking to stifle opinions. This is particularly arduous as there is no agreed upon distinction on where to draw the line.
The opinion makers around the world are fighting over the civil liberties in terms of 'how much is too much' when it comes to allowing dissent and critique - from Charlie Hebdo in France, to blasphemy cases in Pakistan. Some argue that one's liberty to swing fist ends just where other's nose begins. Others argue that nobody can discuss polemics without offending anyone. And the moderates argue that one should learn the art to discuss controversial topics - it's not about what you say, it's about how you say it. So, by and large, there is no clarity in this debate.
Albeit, hate speech is something that translates into instigation against a person or group on the basis of attributes such as race, religion, ethnic origin, sexual orientation, disability, or gender. In several scenarios, it can translate into instigating people to kill someone without any obvious reason. In Pakistan, there are several laws like PPC-153-A - Promoting enmity between different groups, etc. - and in newly drafted cybercrimes law that goes by the title Prevention of Electronic Crimes Act 2016 (PECA) which criminalise hate speech. But fatwas calling people traitors and blasphemous are commonly hurled. Sometimes, it is used as a tool to stifle opinions and critiques against the state institutions on social media.
svg%3E
Whilst it is a dying notion that civil liberties are cemented across the world, it is 'heads I lose, tails you win' kind of situation for activists and journalists in countries like Pakistan. From activists and bloggers being abducted to journalists being killed, the situation looks horrendous. Social media is the only place where the people used to freely create vain echo chambers to critique the government's actions. But they cannot do it without the fear of getting abducted or killed. The recently passed controversial cybercrimes law can be used to curtail dissent in the digital space as indicated by the interior minister Ahsan Iqbal. It is a slippery slope because of lack of accountability in such cases. While the state should be looking to curtail hate speech, it is looking to snub sane voices in the country.
Social media has transpired into a celebrated tool to delve in all sorts of political commentary, it has now become, more often than not, the only avenue for the mainstream populist narrative
The fact that social media has transpired into a celebrated tool to delve in all sorts of political commentary, it has now become, more often than not, the only tool to mainstream populist narrative. The make-or-break deal comes forth based on where the contradictory argument of the proposed ideology rests. Political leaders who once relied on door-to-door knocks for constituency now pitch - at least a part of - to their electoral proposals online. The former US president Barack Obama had a budget of $16 million to campaign on the internet. His election campaign focused on at least 15 different social media platforms along with other hit-hard online strategies indicating that it worked better than the Republican candidate John McCain's campaign.
While Obama may not be a populist leader, the now-elected president Donald Trump gained popular vote through social media. The art of saying what the majority wants to hear is what a populist leader needs to have supremacy in and Trump knew it better than his rivals in the wake of religious and political turmoil in the United States. Because populist ideas don't necessarily have to be ethical or in accordance with the humanitarian beliefs, Trump's constant badgering by great speeches and reinforcement of the campaign slogan while persecuting minorities on multiple media outlets was what worked in his favour.
In the context of Pakistan, Imran Khan, before the General Elections of 2013, successfully garnered support by communicating with particularly young citizens online about the injustices that they have been subjected to under Asif Ali Zardari's presidency. The vigilantes backed by Pakistan Tehreek-e-Insaf (PTI) actively took over every political debate online accounting for the populist ideology to thrive which was further evident by the 3-month long Azadi March in Islamabad and multiple successful rallies across Pakistan.
While Imran Khan's populist rhetoric was aimed at strengthening his own power by constant claims of rigging in the General Elections of 2013, the cry for removing the then Prime Minister Nawaz Sharif and subsequently demanding re-elections, Pakistan's founding pillars are particularly sentient of religious sentiments - a country that came into being to grant religious freedom to its occupants now rejects all beliefs except Islam. As religious monopoly is a populist expression of the Muslim majority, the political parties routinely target this sentiment to accumulate support, for example the recent protest by Tehreek-e-Labbaik Pakistan (TLP) to challenge the proposed amendments in the Elections Bill 2017.
Although the protest didn't stem from social media, it gained momentum online because of the populist idea that it was based on - Ahmadi persecution. It was noticed that the Twitter followers of the leader of TLP, Khaadim Hussain Rizvi, joined Twitter after the protest was mainstreamed, between October and November 2017. It was as though the protesters had found gold at the end of the rainbow and broadcasted their strength via Facebook live and direct online communication with people with the help of the large support that they have garnered in a matter of days, furthering the fact that Islam is the only way forward in Pakistan.
Populism was once largely believed to be instrumental to the social class and was initially introduced to contradict elite rulers, but in the present form it has been translated into victimising the minorities - religious, social, cultural, political et al. Because the belief suits the sentiments of the majority, the demands stemming from it are taken at their face value while absolutely disregarding the minority. Pakistanis have particularly witnessed, and to rightly say that they have supported, multiple genocides against religious minorities on different instances because the ideologies satisfied the mostly Sunni majority. From the persecution of Hazara people and Shia genocide to Ahmedi killings to routinely Hindu and Christian persecution, "you're free to go to your mosques, temples, and churches" by Jinnah isn't valid in the land of pure anymore. Seventy years of Pakistan's history is evidence enough to indicate that conforming to the demands of the populists never plays out well in the favour of the already marginalised groups of society.
While pure populism is widely critiqued in the political context for its poor chances of success, populist movements have occasionally been ethical in principle where the sole purpose of initiating the movement was to acknowledge minority rights or the rights of those who live on the margins. For example, Podemos, the populist Spanish party, is advocating to grant voting rights to immigrants; the regulations around same sex marriages have received favourable popular votes in the countries where it's legal now.
The role of social media has been remarkable in the modern day left-wing populism. Women's March that took place earlier this year in the United States the same day Donald Trump took his oath as the president was solely organised through Facebook. While the narrative wasn't entirely populist, it gained popularity online and gathered millions of women from around the world against something they believed was worth fighting for - indicating towards the potential of online media to initiate social change.
Online media has the power to mould opinions and reinforce ideologies as its occupants deem fit. Adding populism to the evolving digital dynamics empowers people to be the apologists of extremist rhetoric, and it also has the potential of popularising debate around minority rights in conventional conversations. Whilst governments around the world use the internet to advance their ideologies in the modern days, yet it's one of the biggest threats to their authoritarianism resulting in the routinely attempts to stifle dissent on these mediums through disproportionate measures like internet shutdowns - partial and absolute, forced abductions of sane voices against injustices of oligarchy, targeting the religious sentiments of the masses to obscure minority discourse, and restraining free speech on account of it being hate speech or against national security.")
t <- make_features(df_test) %>% select(!c("type", "id"))
t <- as.h2o(t)
as.tibble(h2o.predict(gbm_model, t))
|
#' Prepare input data for analysis after defining refseq, running read.alignment.file and set.tf if needed
#'
#' @keywords internal
prep.aln <- function(S) {
if (class(S) != "swarmtools")
stop("ERROR: Please pass a swarmtools object to prep.aln()")
# if (!is.null(S$aas_file))
if (is.null(S$aas_aln) & !is.null(S$aas_file))
if (file.exists(S$aas_file))
S$aas_aln <- read.alignment.file(S$aas_file, S$aas_prefix, S$alignment_format)
### NB: refseq is used to standardize site numbering (e.g. HXB2),
# not for TF loss, which is specified by tf_index or tf_name
if (!is.null(S$refseq_lut_file) & is.null(S$refseq_lut)) {
S$refseq_lut = create.refseq.lut.from.file(S$refseq_lut_file)
} else if (is.null(S$refseq_lut)) {
# } else if (!is.null(S$refseq_name) & is.null(S$refseq_lut)) {
S <- set.refseq(S)
}
# if (!is.null(S$refseq_name) & is.null(S$refseq_row))
# S <- set.refseq(S)
if (!is.null(S$aas_aln)) {
S$original_seqnames <- rownames(S$aas_aln)
S$timepoint_per_sequence <- parse.timepoints(rownames(S$aas_aln),
uniquify=F, timepoints_parser=S$timepoints_parser)
# if (!is.null(S$timepoint_per_sequence)) {
# timepoint_order <- order(as.numeric(gsub("^[A-Za-z]", "",
# unique(S$timepoint_per_sequence))))
# if (!is.null(timepoint_order)) {
S$n_per_timepoint <- table(sort(S$timepoint_per_sequence))
## order n_per_timepoint by numeric value/s --- NEED TO TEST
# S$n_per_timepont = n_per_timepoint[timepoint_order]
# names(S$n_per_timepoint) = names(n_per_timepoint)[timepoint_order]
# }
#}
S <- set.tf(S)
}
S
}
| /R/prep.aln.R | no_license | eclarke/lassie | R | false | false | 1,682 | r | #' Prepare input data for analysis after defining refseq, running read.alignment.file and set.tf if needed
#'
#' @keywords internal
prep.aln <- function(S) {
if (class(S) != "swarmtools")
stop("ERROR: Please pass a swarmtools object to prep.aln()")
# if (!is.null(S$aas_file))
if (is.null(S$aas_aln) & !is.null(S$aas_file))
if (file.exists(S$aas_file))
S$aas_aln <- read.alignment.file(S$aas_file, S$aas_prefix, S$alignment_format)
### NB: refseq is used to standardize site numbering (e.g. HXB2),
# not for TF loss, which is specified by tf_index or tf_name
if (!is.null(S$refseq_lut_file) & is.null(S$refseq_lut)) {
S$refseq_lut = create.refseq.lut.from.file(S$refseq_lut_file)
} else if (is.null(S$refseq_lut)) {
# } else if (!is.null(S$refseq_name) & is.null(S$refseq_lut)) {
S <- set.refseq(S)
}
# if (!is.null(S$refseq_name) & is.null(S$refseq_row))
# S <- set.refseq(S)
if (!is.null(S$aas_aln)) {
S$original_seqnames <- rownames(S$aas_aln)
S$timepoint_per_sequence <- parse.timepoints(rownames(S$aas_aln),
uniquify=F, timepoints_parser=S$timepoints_parser)
# if (!is.null(S$timepoint_per_sequence)) {
# timepoint_order <- order(as.numeric(gsub("^[A-Za-z]", "",
# unique(S$timepoint_per_sequence))))
# if (!is.null(timepoint_order)) {
S$n_per_timepoint <- table(sort(S$timepoint_per_sequence))
## order n_per_timepoint by numeric value/s --- NEED TO TEST
# S$n_per_timepont = n_per_timepoint[timepoint_order]
# names(S$n_per_timepoint) = names(n_per_timepoint)[timepoint_order]
# }
#}
S <- set.tf(S)
}
S
}
|
# TODO: Add comment
#
# Author: furia
###############################################################################
setMethod(
f = "getEntityInstance",
signature = signature("list"),
definition = function(entity)
{
class <- getClassFromSynapseEntityType(entity$entityType)
## synapseEntity is the default
if(is.null(class))
class <- "SynapseEntity"
if(class == "SynapseEntity"){
if(!is.null(entity$locations) && length(entity$locations) > 0)
class <- "SynapseLocationOwnerWithObjects"
}
## call the appropriate constructor and pass the list
## representation of the entity
ee <- do.call(class, list(entity = entity))
ee@synapseWebUrl <- .buildSynapseUrl(propertyValue(ee, "id"))
if(inherits(ee, "SynapseLocationOwner")){
url <- ee$properties$locations[[1]][['path']]
if(!is.null(url)){
## instantiate the ArchiveOwner
parsedUrl <- .ParsedUrl(url)
destfile <- file.path(synapseCacheDir(), gsub("^/", "", parsedUrl@path))
destfile <- path.expand(destfile)
cacheRoot <- dirname(destfile)
}else if(!is.null(ee$properties$id)){
## use an entity-specifict temp dir
cacheRoot <- file.path(tempdir(), ee$properties$id)
}else{
## use a temp dir
cacheRoot <- tempdir()
}
## TODO: remove this block after fixing setCacheRoot
if(!file.exists(cacheRoot))
dir.create(cacheRoot, recursive=TRUE)
cacheRoot <- gsub("[\\/]+", "/", normalizePath(cacheRoot))
if(cacheRoot %in% synapseClient:::availFileCaches()){
ee@archOwn@fileCache <- getFileCache(cacheRoot)
}else{
## TODO: fix this
setCacheRoot(ee@archOwn, cacheRoot, clean = TRUE)
}
}
ee
}
)
setMethod(
f = "initialzeEntity",
signature = "SynapseEntity",
definition = function(entity){
entity
}
)
setMethod(
f = "initialzeEntity",
signature = "SynapseLocationOwner",
definition = function(entity){
ifun <- getMethod("initialzeEntity", "SynapseEntity")
entity <- ifun(entity)
## get the cache url for this entity
url <- properties(entity)$locations[[1]][['path']]
if(is.null(url))
return(entity)
parsedUrl <- synapseClient:::.ParsedUrl(propertyValue(entity, 'locations')[[1]]['path'])
destdir <- file.path(synapseCacheDir(), gsub("^/", "", parsedUrl@pathPrefix))
if(!file.exists(destdir))
dir.create(destdir, recursive=T)
destdir <- normalizePath(path.expand(destdir))
## instantiate the file cache an put the reference in
## the archOwner
fc <- getFileCache(destdir)
entity@archOwn@fileCache <- fc
entity
}
)
setMethod(
f = "initialzeEntity",
signature = "SynapseLocationOwnerWithObjects",
definition = function(entity){
ifun <- getMethod("initialzeEntity", "SynapseLocationOwner")
entity <- ifun(entity)
## instantiate the file cache an put the reference in
## the archOwner
entity@objOwn$fileCache <- entity@archOwn@fileCache
entity
}
)
| /R/getEntityInstance.R | no_license | MattNapsAlot/rSynapseClient | R | false | false | 3,069 | r | # TODO: Add comment
#
# Author: furia
###############################################################################
setMethod(
f = "getEntityInstance",
signature = signature("list"),
definition = function(entity)
{
class <- getClassFromSynapseEntityType(entity$entityType)
## synapseEntity is the default
if(is.null(class))
class <- "SynapseEntity"
if(class == "SynapseEntity"){
if(!is.null(entity$locations) && length(entity$locations) > 0)
class <- "SynapseLocationOwnerWithObjects"
}
## call the appropriate constructor and pass the list
## representation of the entity
ee <- do.call(class, list(entity = entity))
ee@synapseWebUrl <- .buildSynapseUrl(propertyValue(ee, "id"))
if(inherits(ee, "SynapseLocationOwner")){
url <- ee$properties$locations[[1]][['path']]
if(!is.null(url)){
## instantiate the ArchiveOwner
parsedUrl <- .ParsedUrl(url)
destfile <- file.path(synapseCacheDir(), gsub("^/", "", parsedUrl@path))
destfile <- path.expand(destfile)
cacheRoot <- dirname(destfile)
}else if(!is.null(ee$properties$id)){
## use an entity-specifict temp dir
cacheRoot <- file.path(tempdir(), ee$properties$id)
}else{
## use a temp dir
cacheRoot <- tempdir()
}
## TODO: remove this block after fixing setCacheRoot
if(!file.exists(cacheRoot))
dir.create(cacheRoot, recursive=TRUE)
cacheRoot <- gsub("[\\/]+", "/", normalizePath(cacheRoot))
if(cacheRoot %in% synapseClient:::availFileCaches()){
ee@archOwn@fileCache <- getFileCache(cacheRoot)
}else{
## TODO: fix this
setCacheRoot(ee@archOwn, cacheRoot, clean = TRUE)
}
}
ee
}
)
setMethod(
f = "initialzeEntity",
signature = "SynapseEntity",
definition = function(entity){
entity
}
)
setMethod(
f = "initialzeEntity",
signature = "SynapseLocationOwner",
definition = function(entity){
ifun <- getMethod("initialzeEntity", "SynapseEntity")
entity <- ifun(entity)
## get the cache url for this entity
url <- properties(entity)$locations[[1]][['path']]
if(is.null(url))
return(entity)
parsedUrl <- synapseClient:::.ParsedUrl(propertyValue(entity, 'locations')[[1]]['path'])
destdir <- file.path(synapseCacheDir(), gsub("^/", "", parsedUrl@pathPrefix))
if(!file.exists(destdir))
dir.create(destdir, recursive=T)
destdir <- normalizePath(path.expand(destdir))
## instantiate the file cache an put the reference in
## the archOwner
fc <- getFileCache(destdir)
entity@archOwn@fileCache <- fc
entity
}
)
setMethod(
f = "initialzeEntity",
signature = "SynapseLocationOwnerWithObjects",
definition = function(entity){
ifun <- getMethod("initialzeEntity", "SynapseLocationOwner")
entity <- ifun(entity)
## instantiate the file cache an put the reference in
## the archOwner
entity@objOwn$fileCache <- entity@archOwn@fileCache
entity
}
)
|
library(testthat)
library(compareheight)
test_check("compareheight")
| /tests/testthat.R | no_license | riebetob/compareheight | R | false | false | 70 | r | library(testthat)
library(compareheight)
test_check("compareheight")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graphModels.R
\name{mplus.traceplot}
\alias{mplus.traceplot}
\title{Plot the samples for each MCMC chain as a function of iterations}
\usage{
mplus.traceplot(mplus.model, rows = 4, cols = 4, parameters_only = TRUE)
}
\arguments{
\item{mplus.model}{An Mplus model extracted by the \code{readModels} function.}
\item{rows}{Number of rows to display per plot.}
\item{cols}{Optional. Number of columns to display per plot.}
\item{parameters_only}{Optional. If TRUE, only the unstandardized parameter estimates from the MCMC
draws will be displayed (as opposed to standardized estimates, r-square estimates, etc.).
The unstandardized estimates all begin with "Parameter" in the Mplus gh5 output.}
}
\value{
No value is returned by this function.
Called for the side effect of displaying an MCMC chains traceplot.
}
\description{
Displays a traceplot of the MCMC draws from the poster distribution of each parameter estimate for a Bayesian Mplus model.
This function requires that 1) PLOT: TYPE=PLOT2; be included in the Mplus input file, 2) a gh5 file be present corresponding
to the Mplus output file (and containing a bayesian_data section), and 3) that the rhdf5 package be installed to allow
the gh5 file to be imported.
}
\details{
A multi-panel plot is drawn to the screen and the user is prompted to display the next plot if more than rows x columns estimates are
in the model.
}
\note{
Trace and density plots can also be obtained using the coda package and the bparameters
element of the mplus.model object. This requires that the posterior draws
be saved using SAVEDATA: BPARAMETERS syntax. See example below.
}
\examples{
\dontrun{
myModel <- readModels("BayesModel_WithGH5MCMC.out")
mplus.traceplot(myModel, rows=2, cols=3)
#alternative using the coda package
library(coda)
plot(myModel$bparameters$valid_draw)
}
}
\author{
Joseph Glass, Michael Hallquist
}
\seealso{
\code{\link{plot.mcmc}}
}
\keyword{interface}
| /man/mplus.traceplot.Rd | no_license | dougtommet/MplusAutomation | R | false | true | 2,020 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graphModels.R
\name{mplus.traceplot}
\alias{mplus.traceplot}
\title{Plot the samples for each MCMC chain as a function of iterations}
\usage{
mplus.traceplot(mplus.model, rows = 4, cols = 4, parameters_only = TRUE)
}
\arguments{
\item{mplus.model}{An Mplus model extracted by the \code{readModels} function.}
\item{rows}{Number of rows to display per plot.}
\item{cols}{Optional. Number of columns to display per plot.}
\item{parameters_only}{Optional. If TRUE, only the unstandardized parameter estimates from the MCMC
draws will be displayed (as opposed to standardized estimates, r-square estimates, etc.).
The unstandardized estimates all begin with "Parameter" in the Mplus gh5 output.}
}
\value{
No value is returned by this function.
Called for the side effect of displaying an MCMC chains traceplot.
}
\description{
Displays a traceplot of the MCMC draws from the poster distribution of each parameter estimate for a Bayesian Mplus model.
This function requires that 1) PLOT: TYPE=PLOT2; be included in the Mplus input file, 2) a gh5 file be present corresponding
to the Mplus output file (and containing a bayesian_data section), and 3) that the rhdf5 package be installed to allow
the gh5 file to be imported.
}
\details{
A multi-panel plot is drawn to the screen and the user is prompted to display the next plot if more than rows x columns estimates are
in the model.
}
\note{
Trace and density plots can also be obtained using the coda package and the bparameters
element of the mplus.model object. This requires that the posterior draws
be saved using SAVEDATA: BPARAMETERS syntax. See example below.
}
\examples{
\dontrun{
myModel <- readModels("BayesModel_WithGH5MCMC.out")
mplus.traceplot(myModel, rows=2, cols=3)
#alternative using the coda package
library(coda)
plot(myModel$bparameters$valid_draw)
}
}
\author{
Joseph Glass, Michael Hallquist
}
\seealso{
\code{\link{plot.mcmc}}
}
\keyword{interface}
|
setwd("~/Magic Briefcase/coursera/Exploratory/proj1")
dat <- read.table("household_power_consumption.txt", skip = 66637, nrow = 2880, sep = ";", col.names = colnames(read.table("household_power_consumption.txt", nrow = 1, header = TRUE, sep=";")))
dat$Date <- as.character(dat$Date)
dat$Time <- as.character(dat$Time)
dat$dateTime <- as.POSIXct(strptime(paste(dat$Date,dat$Time), "%d/%m/%Y %H:%M:%S"))
plot(dat$dateTime, dat$Global_active_power, xlab = "", ylab="Global Active Power (kilowatts)", type="l")
dev.copy(png, file="plot2.png")
dev.off()
| /plot2.R | no_license | fgoeddeke/ExData_Plotting1 | R | false | false | 549 | r | setwd("~/Magic Briefcase/coursera/Exploratory/proj1")
dat <- read.table("household_power_consumption.txt", skip = 66637, nrow = 2880, sep = ";", col.names = colnames(read.table("household_power_consumption.txt", nrow = 1, header = TRUE, sep=";")))
dat$Date <- as.character(dat$Date)
dat$Time <- as.character(dat$Time)
dat$dateTime <- as.POSIXct(strptime(paste(dat$Date,dat$Time), "%d/%m/%Y %H:%M:%S"))
plot(dat$dateTime, dat$Global_active_power, xlab = "", ylab="Global Active Power (kilowatts)", type="l")
dev.copy(png, file="plot2.png")
dev.off()
|
# clustering
library(ggplot2)
library(cluster)
library(factoextra)
ab <- read.csv('abalone.csv')
#View(wine)
str(ab)
summary(ab)
any(is.na(ab))
View(head(ab))
ggplot(ab, aes(Diameter, Shell.weight, color = Sex)) + geom_point()
ab$Sex <- NULL
ab.scale <- scale(ab[-1])
View(head(ab.scale))
fviz_nbclust(ab.scale, kmeans, method = 'wss') + geom_vline(xintercept = 4, linetype=5, col='darkred')
result <- kmeans(ab.scale, 3, nstart=10)
result
result$cluster <- as.factor(result$cluster)
ggplot(ab, aes(Diameter, Shell.weight, color = result$cluster))
| /cluster_abalone.R | no_license | SaksheePhade/Case-Study-FOSSEE | R | false | false | 585 | r | # clustering
library(ggplot2)
library(cluster)
library(factoextra)
ab <- read.csv('abalone.csv')
#View(wine)
str(ab)
summary(ab)
any(is.na(ab))
View(head(ab))
ggplot(ab, aes(Diameter, Shell.weight, color = Sex)) + geom_point()
ab$Sex <- NULL
ab.scale <- scale(ab[-1])
View(head(ab.scale))
fviz_nbclust(ab.scale, kmeans, method = 'wss') + geom_vline(xintercept = 4, linetype=5, col='darkred')
result <- kmeans(ab.scale, 3, nstart=10)
result
result$cluster <- as.factor(result$cluster)
ggplot(ab, aes(Diameter, Shell.weight, color = result$cluster))
|
/plugins/MacAU/PurestEcho/PurestEcho.r | permissive | benjohnson2001/airwindows | R | false | false | 3,236 | r | ||
require(lidR)
require(magrittr)
files = c('d:/geoslam.laz', 'd:/Projects/TLStools/test_clouds/gerdau.laz', 'd:/Roadbridge.laz', 'd:/aline/LAZ/bloco1.laz')
vx_sizes = c(0.1, 0.05, 0.025, 0.01)
f = files[2]
v = vx_sizes[2]
props = data.frame()
for(f in files){
for(v in vx_sizes){
filter = '-thin_with_voxel' %>% paste(v)
tmBeg = Sys.time()
paste(f,v,tmBeg) %>% print
Sys.sleep(10)
cloud = readLAS(f, select='XYZ', filter=filter)
Sys.sleep(10)
tmEnd = Sys.time()
temp = data.frame(file = f, voxel = v, n = nrow(cloud@data), start=tmBeg, end=tmEnd)
props %<>% rbind(temp)
rm(cloud)
gc(T,T,T)
}
}
write.csv(props, 'tls_test.csv', row.names = F) | /R/tls_test.R | no_license | tiagodc/Scripts | R | false | false | 696 | r | require(lidR)
require(magrittr)
files = c('d:/geoslam.laz', 'd:/Projects/TLStools/test_clouds/gerdau.laz', 'd:/Roadbridge.laz', 'd:/aline/LAZ/bloco1.laz')
vx_sizes = c(0.1, 0.05, 0.025, 0.01)
f = files[2]
v = vx_sizes[2]
props = data.frame()
for(f in files){
for(v in vx_sizes){
filter = '-thin_with_voxel' %>% paste(v)
tmBeg = Sys.time()
paste(f,v,tmBeg) %>% print
Sys.sleep(10)
cloud = readLAS(f, select='XYZ', filter=filter)
Sys.sleep(10)
tmEnd = Sys.time()
temp = data.frame(file = f, voxel = v, n = nrow(cloud@data), start=tmBeg, end=tmEnd)
props %<>% rbind(temp)
rm(cloud)
gc(T,T,T)
}
}
write.csv(props, 'tls_test.csv', row.names = F) |
# build_prior_utils.R
# Functions used for building and processing prior networks.
# peaks_region_2_name
# Convert dataframe defining genomic regions into character
# vector of region names
# Inputs:
# df_peak = 3 column dataframe of the form [Chr Start End]
# delimiter = delimiter for elements in character vector (default: '_')
# Output:
# name_peak = character vector of peak names
peaks_region_2_name <- function(
df_peak,
delimiter='_'
){
name_peak = paste(df_peak[,1], df_peak[,2], df_peak[,3], sep=delimiter)
return(name_peak)
}
# net_sparse_2_full
# Convert sparse network to full matrix format
# Network should be of the form: [Node1, Node2, Weight, ...]
# Only the first three columns will be used.
# Inputs:
# net_sp = sparse network file name or sparse file
# Outputs:
# net_full = full matrix network, rows=Node2, columns=Node1
net_sparse_2_full <- function(
net_sp
){
library('Matrix')
# load sparse network if necessary
if (is.character(net_sp)){
print(paste('Load sparse network:',net_sp))
net_sp <- read.delim(net_sp, header=TRUE, sep='\t')
}
net_sp <- net_sp[,1:3]
colnames(net_sp) <- c('Node1', 'Node2', 'Weight')
# unique TFs & Targets
uniq_node1 <- unique(net_sp$Node1)
uniq_node2 <- unique(net_sp$Node2)
n_uniq_node1 <- length(uniq_node1)
n_uniq_node2 <- length(uniq_node2)
# assign unique nodes to indices
df_node1 <- data.frame(idx=1:n_uniq_node1, row.names=uniq_node1)
df_node2 <- data.frame(idx=1:n_uniq_node2, row.names=uniq_node2)
# create sparse network using indices
idx_node1 <- df_node1[as.character(net_sp$Node1),1]
idx_node2 <- df_node2[as.character(net_sp$Node2),1]
net_sp_idx <- data.frame(Node1=idx_node1, Node2=idx_node2)
net_sp_idx <- cbind(net_sp_idx, net_sp[,3])
colnames(net_sp_idx) <- c('Node1', 'Node2', 'Weight')
# full network
net_full <- sparseMatrix(i=net_sp_idx$Node2, j=net_sp_idx$Node1, x=net_sp_idx$Weight)
net_full <- as.matrix(net_full)
rownames(net_full) <- as.character(uniq_node2)
colnames(net_full) <- as.character(uniq_node1)
return(net_full)
}
# net_full_2_sparse
# Convert network in full matrix format to sparse format
# Sparse network will have the form [TF, Target, Weight]
# where TF and Target are the columns and rows of the full
# matrix, respectively, and Weight are the non-zero matrix entries.
# Input:
# net_full <- full network matrix, tab delimited, rows=Targets, cols=TFs
# Output:
# net_sp <- sparse network, 3 column format: [TF, Target, Weight]
net_full_2_sparse <- function(
net_full
){
# sparse matrix
net_sp <- as.data.frame(as.table(as.matrix(net_full)))
net_sp <- data.frame(TF=net_sp[,2], Target=net_sp[,1], Weight=net_sp[,3])
# delete zero entries
idx_zero <- which(net_sp$Weight==0)
if (length(idx_zero) > 0){
net_sp <- net_sp[-idx_zero,]
}
return(net_sp)
}
# net_quant_2_binary
# Convert sparse network to binary network
# All interaction weights will be set to 1
# Network should be of the form: [TF, Target, Weight, ...]
# Only the first three columns will be used.
# Inputs:
# net_sp = sparse network, 3 column format: [TF, Target, Weight]
# Outputs:
# net_sp_b = sparse network, all weights equal to 1
net_quant_2_binary <- function(
net_sp
){
# load sparse network if necessary
if (is.character(net_sp)){
print(paste('Load sparse network:',net_sp))
net_sp <- read.delim(net_sp, sep='\t', header=TRUE)
}
net_sp <- net_sp[,1:3]
# Set weights to 1
net_sp_b <- data.frame(TF=net_sp[,1], Target=net_sp[,2], Weight=1)
return(net_sp_b)
}
# net_sum
# Sum of two full networks
# Dimensions do not have to be the same
# Inputs:
# net1 = network 1, full matrix format
# net2 = network 2, full matrix format
# Outputs:
# net_out = sum of input networks
net_sum <- function(
net1,
net2
){
# unique rownames and colnames
row_names <- sort(unique(c(rownames(net1), rownames(net2))))
n_row <- length(row_names)
col_names <- sort(unique(c(colnames(net1), colnames(net2))))
n_col <- length(col_names)
# initialize sum matrix
net1_sum <- matrix(0,n_row,n_col)
rownames(net1_sum) <- row_names
colnames(net1_sum) <- col_names
net2_sum <- matrix(0,n_row,n_col)
rownames(net2_sum) <- row_names
colnames(net2_sum) <- col_names
# sum inputs
net1_sum[rownames(net1),colnames(net1)] <- net1
net2_sum[rownames(net2),colnames(net2)] <- net2
net_out <- net1_sum + net2_sum
return(net_out)
}
# prior_proximal
# Build prior based on motif occurrences proximal to a region
# surrounding genomic features.
# Inputs:
# bed_motif = bed file/dataframe of motif locations, 4-column format: [Chr, Start, End, Motif]
# bed_feature = bed file/dataframe of genomic features to map TFs to: 4-column format: [Chr, Start, End, Gene]
# tf_motif = file/dataframe mapping motifs to TF names, 2-column format: [Motif, TF]
# window_feature = window (# kb) around bed features to map to TFs (default: 10000)
# bed_active = bed file/dataframe of active genomic regions (eg, active histone markers) (default: NULL)
# Output:
# prior = 3-column prior network [TF Target Weight]
prior_proximal <- function(
bed_motif,
bed_feature,
tf_motif,
window_feature=10000,
bed_active=NULL
){
# load motif bed file
if (is.character(bed_motif)){
print(paste('Load motif bed file:',bed_motif))
bed_motif <- read.delim(bed_motif, header=FALSE, sep='\t')
}
bed_motif <- bed_motif[,1:4]
colnames(bed_motif) <- c('Chr', 'Start', 'End', 'Motif')
# load feature bed file
if (is.character(bed_feature)){
print(paste('Load feature bed file:',bed_feature))
bed_feature <- read.delim(bed_feature, header=FALSE, sep='\t')
}
bed_feature <- bed_feature[,1:4]
# load TF to motif map if necessary
if (is.character(tf_motif)){
print(paste('Load TF to motif info:',tf_motif))
tf_motif <- read.delim(tf_motif, header=FALSE, sep='\t')
}
tf_motif <- tf_motif[,1:2]
colnames(tf_motif) <- c('Motif','TF')
tf_motif <- data.frame(TF=tf_motif$TF, Motif=tf_motif$Motif)
# prune bed features if necessary
if (!is.null(bed_active)){
# load active bed
if (is.character(bed_active)){
print(paste('Load active bed file:',bed_active))
bed_active <- read.delim(bed_active, header=FALSE, sep='\t')
}
print(paste('Prune feature bed using active bed'))
bed_feature <- prune_bed_feature(bed_feature=bed_feature, bed_keep_these=bed_active)
}
colnames(bed_feature) <- c('Chr', 'Start', 'End', 'Gene')
# create temporary working directory
dir_wd <- basename(tempfile(pattern='tmp_dir_'))
dir.create(dir_wd, showWarnings=TRUE)
# create motif bed file
print('Create motif bed file')
write.table(merge(bed_motif, tf_motif, by='Motif')[,2:5], file.path(dir_wd,'tmp_bed_motif.bed'),
quote=FALSE, row.names=FALSE, col.names=FALSE, sep='\t')
# sort motif bed file
system(paste0('bedtools sort -i ', file.path(dir_wd,'tmp_bed_motif.bed'),' > ',
file.path(dir_wd,'tmp_bed_motif_sorted.bed')))
# save bed features to working
write.table(bed_feature, file.path(dir_wd,'tmp_bed_feature.bed'), quote=FALSE, sep='\t',
row.names=FALSE, col.names=FALSE)
# sort bed features file
system(paste0('bedtools sort -i ', file.path(dir_wd,'tmp_bed_feature.bed'),' > ',
file.path(dir_wd,'tmp_bed_feature_sorted.bed')))
# intersection of gene features and motifs
print('Window motifs with gene features')
cmd_window <- paste0("bedtools window -w ",window_feature," -a ",
dir_wd,"/tmp_bed_feature_sorted.bed -b ",
dir_wd,"/tmp_bed_motif_sorted.bed | awk -v OFS='\t' '{print $8,$4}' > ",
dir_wd,"/tmp_window_interactions.bed")
system(cmd_window)
# load interactions
df_int <- read.delim(file.path(dir_wd,'tmp_window_interactions.bed'), header=FALSE, sep='\t')
colnames(df_int) <- c('TF','Target')
# delete temporary working directory
system(paste0('rm -r ',dir_wd))
# count number of motifs in vicinity of each gene
print('Count TF-gene interactions')
name_int <- paste(df_int[,1], df_int[,2], sep='_')
idx_unique <- which(!duplicated(name_int))
unique_int <- df_int[idx_unique,]
name_unique_int <- paste(unique_int[,1], unique_int[,2], sep='_')
df_name_int <- as.data.frame(table(name_int))
df_name_int <- data.frame(Freq=df_name_int$Freq, row.names=df_name_int$name_int)
# build prior
print('Build prior')
prior <- data.frame(TF=unique_int$TF, Target=unique_int$Target, Weight=df_name_int[name_unique_int,1])
name_prior <- paste(prior$TF, prior$Target, sep='_')
sort_prior <- sort(name_prior, index.return=TRUE)
prior <- prior[sort_prior$ix,]
rownames(prior) <- 1:dim(prior)[1]
return(prior)
}
# prune_bed_feature
# Returns features specified by a bed file that overlap another bed file.
# Inputs:
# bed_feature = bed file of genomic features to prune
# bed_keep_these = keep the feature in bed_feature that overlap with these
# keep_unique = keep unique pruned bed file features (default: TRUE)
# Outputs
# bed_pruned = bed file of genomic features overlapped desired region
prune_bed_feature <- function(
bed_feature,
bed_keep_these,
keep_unique=TRUE
){
# create temporary working directory
dir_wd <- basename(tempfile(pattern='tmp_dir_'))
dir.create(dir_wd, showWarnings=TRUE)
# load bed files
if (is.character(bed_feature)){
bed_feature <- read.delim(bed_feature, header=FALSE, sep='\t')
}
if (is.character(bed_keep_these)){
bed_keep_these <- read.delim(bed_keep_these, header=FALSE, sep='\t')
}
# number of input bed columns
n_col_bed_feature <- dim(bed_feature)[2]
n_col_bed_keep_these <- dim(bed_keep_these)[2]
# write bed files to temporary working directory
write.table(bed_feature, file.path(dir_wd, 'bed_feature.bed'), quote=FALSE,
sep='\t', col.names=FALSE, row.names=FALSE)
write.table(bed_keep_these, file.path(dir_wd, 'bed_keep_these.bed'), quote=FALSE,
sep='\t', col.names=FALSE, row.names=FALSE)
# sort bed files
system(paste0('bedtools sort -i ',file.path(dir_wd, 'bed_feature.bed'),' > ',
file.path(dir_wd, 'bed_feature_sorted.bed')))
system(paste0('bedtools sort -i ',file.path(dir_wd, 'bed_keep_these.bed'),' > ',
file.path(dir_wd, 'bed_keep_these_sorted.bed')))
# prune bed features
cmd_window <- paste0('bedtools window -w 0 -a ', file.path(dir_wd,'bed_keep_these_sorted.bed'), ' -b ',
file.path(dir_wd, 'bed_feature.bed'),' > ', file.path(dir_wd,'bed_window.bed'))
system(cmd_window)
n_line_window <- length(readLines(file.path(dir_wd,'bed_window.bed')))
if (n_line_window > 0){
bed_pruned <- read.delim(file.path(dir_wd,'bed_window.bed'), header=FALSE, sep='\t')
bed_pruned <- bed_pruned[,(n_col_bed_keep_these+1):(n_col_bed_keep_these+n_col_bed_feature)]
# keep unique features
if (keep_unique){
name_feat <- NULL
for (ix in 1:n_col_bed_feature){
name_feat <- paste(name_feat, bed_pruned[,ix], sep='_')
}
idx_keep <- which(!duplicated(name_feat))
bed_pruned <- bed_pruned[idx_keep,]
}
} else {
bed_pruned <- NULL
}
# remove temporary working directory
system(paste0('rm -r ',dir_wd))
return(bed_pruned)
}
# save_data_matrix
# Save gene expression matrix.
# txt file format, tab-delimited, rows=genes, cols=cells
# Inputs:
# counts_mat = gene expression matrix
# filename = output file name
# out_digits = maximum number of significant digits to keep (default: 5)
save_data_matrix <- function(
counts_mat,
filename,
out_digits=5
){
write.table(signif(counts_mat, digits=out_digits), filename, quote=FALSE, row.names=TRUE, col.names=NA, sep='\t')
}
# load_data_matrix
# Load gene expression matrix.
# txt file format, tab-delimited, rows=genes, cols=cells
# header are unique cell names, first column is gene names
# Inputs:
# filename = filename string
# replace_na = replace NAs with zeros (default: FALSE)
# Output:
# counts_mat = gene expression matrix
load_data_matrix <- function(
filename,
replace_na=FALSE
){
counts_mat <- read.delim(filename, header=TRUE, row.names=1, check.names=FALSE)
if (replace_na){
counts_mat[is.na(counts_mat)] <- 0
}
counts_mat <- as.matrix(counts_mat)
return(counts_mat)
}
# net_prior_merge
# Create merged prior.
# Inputs:
# net_full = full network, format: [Targets X TFs]
# Output:
# net_out = list containing full network and degenerate TF info
net_prior_merge <- function(
net_full
){
# load network
if (is.character(net_full)){
print(paste('Load network:',net_full))
net_full <- load_data_matrix(net_full)
}
# unique string for each TF
n_tf <- dim(net_full)[2]
str_tf <- NULL
for (ix in 1:n_tf){
curr_str <- paste(net_full[,ix], collapse='|')
str_tf <- c(str_tf, curr_str)
}
# group TFs with same target profile
uniq_str_tf <- unique(str_tf)
n_uniq_str <- length(uniq_str_tf)
net_merged <- NULL
all_name <- NULL
merged_tfs <- NULL
for (ix in 1:n_uniq_str){
curr_str <- uniq_str_tf[ix]
idx_tf <- which(str_tf==curr_str)
curr_name <- paste(colnames(net_full)[idx_tf], collapse='_')
all_name <- c(all_name,curr_name)
net_merged <- cbind(net_merged, net_full[,idx_tf[1]])
if (length(idx_tf)>1){
curr_merged <- data.frame(name_merged=curr_name,
tfs_merged=paste(colnames(net_full)[idx_tf], collapse=', '))
merged_tfs <- rbind(merged_tfs, curr_merged)
}
}
colnames(net_merged) <- all_name
rownames(net_merged) <- rownames(net_full)
net_out <- list()
net_out[['Network']] <- net_merged
net_out[['MergedTFs']] <- merged_tfs
return(net_out)
}
| /priorConstruction/utils_prior.R | no_license | emiraldi/infTRN_lassoStARS | R | false | false | 14,476 | r | # build_prior_utils.R
# Functions used for building and processing prior networks.
# peaks_region_2_name
# Convert dataframe defining genomic regions into character
# vector of region names
# Inputs:
# df_peak = 3 column dataframe of the form [Chr Start End]
# delimiter = delimiter for elements in character vector (default: '_')
# Output:
# name_peak = character vector of peak names
peaks_region_2_name <- function(
df_peak,
delimiter='_'
){
name_peak = paste(df_peak[,1], df_peak[,2], df_peak[,3], sep=delimiter)
return(name_peak)
}
# net_sparse_2_full
# Convert sparse network to full matrix format
# Network should be of the form: [Node1, Node2, Weight, ...]
# Only the first three columns will be used.
# Inputs:
# net_sp = sparse network file name or sparse file
# Outputs:
# net_full = full matrix network, rows=Node2, columns=Node1
net_sparse_2_full <- function(
net_sp
){
library('Matrix')
# load sparse network if necessary
if (is.character(net_sp)){
print(paste('Load sparse network:',net_sp))
net_sp <- read.delim(net_sp, header=TRUE, sep='\t')
}
net_sp <- net_sp[,1:3]
colnames(net_sp) <- c('Node1', 'Node2', 'Weight')
# unique TFs & Targets
uniq_node1 <- unique(net_sp$Node1)
uniq_node2 <- unique(net_sp$Node2)
n_uniq_node1 <- length(uniq_node1)
n_uniq_node2 <- length(uniq_node2)
# assign unique nodes to indices
df_node1 <- data.frame(idx=1:n_uniq_node1, row.names=uniq_node1)
df_node2 <- data.frame(idx=1:n_uniq_node2, row.names=uniq_node2)
# create sparse network using indices
idx_node1 <- df_node1[as.character(net_sp$Node1),1]
idx_node2 <- df_node2[as.character(net_sp$Node2),1]
net_sp_idx <- data.frame(Node1=idx_node1, Node2=idx_node2)
net_sp_idx <- cbind(net_sp_idx, net_sp[,3])
colnames(net_sp_idx) <- c('Node1', 'Node2', 'Weight')
# full network
net_full <- sparseMatrix(i=net_sp_idx$Node2, j=net_sp_idx$Node1, x=net_sp_idx$Weight)
net_full <- as.matrix(net_full)
rownames(net_full) <- as.character(uniq_node2)
colnames(net_full) <- as.character(uniq_node1)
return(net_full)
}
# net_full_2_sparse
# Convert network in full matrix format to sparse format
# Sparse network will have the form [TF, Target, Weight]
# where TF and Target are the columns and rows of the full
# matrix, respectively, and Weight are the non-zero matrix entries.
# Input:
# net_full <- full network matrix, tab delimited, rows=Targets, cols=TFs
# Output:
# net_sp <- sparse network, 3 column format: [TF, Target, Weight]
net_full_2_sparse <- function(
net_full
){
# sparse matrix
net_sp <- as.data.frame(as.table(as.matrix(net_full)))
net_sp <- data.frame(TF=net_sp[,2], Target=net_sp[,1], Weight=net_sp[,3])
# delete zero entries
idx_zero <- which(net_sp$Weight==0)
if (length(idx_zero) > 0){
net_sp <- net_sp[-idx_zero,]
}
return(net_sp)
}
# net_quant_2_binary
# Convert sparse network to binary network
# All interaction weights will be set to 1
# Network should be of the form: [TF, Target, Weight, ...]
# Only the first three columns will be used.
# Inputs:
# net_sp = sparse network, 3 column format: [TF, Target, Weight]
# Outputs:
# net_sp_b = sparse network, all weights equal to 1
net_quant_2_binary <- function(
net_sp
){
# load sparse network if necessary
if (is.character(net_sp)){
print(paste('Load sparse network:',net_sp))
net_sp <- read.delim(net_sp, sep='\t', header=TRUE)
}
net_sp <- net_sp[,1:3]
# Set weights to 1
net_sp_b <- data.frame(TF=net_sp[,1], Target=net_sp[,2], Weight=1)
return(net_sp_b)
}
# net_sum
# Sum of two full networks
# Dimensions do not have to be the same
# Inputs:
# net1 = network 1, full matrix format
# net2 = network 2, full matrix format
# Outputs:
# net_out = sum of input networks
net_sum <- function(
net1,
net2
){
# unique rownames and colnames
row_names <- sort(unique(c(rownames(net1), rownames(net2))))
n_row <- length(row_names)
col_names <- sort(unique(c(colnames(net1), colnames(net2))))
n_col <- length(col_names)
# initialize sum matrix
net1_sum <- matrix(0,n_row,n_col)
rownames(net1_sum) <- row_names
colnames(net1_sum) <- col_names
net2_sum <- matrix(0,n_row,n_col)
rownames(net2_sum) <- row_names
colnames(net2_sum) <- col_names
# sum inputs
net1_sum[rownames(net1),colnames(net1)] <- net1
net2_sum[rownames(net2),colnames(net2)] <- net2
net_out <- net1_sum + net2_sum
return(net_out)
}
# prior_proximal
# Build prior based on motif occurrences proximal to a region
# surrounding genomic features.
# Inputs:
# bed_motif = bed file/dataframe of motif locations, 4-column format: [Chr, Start, End, Motif]
# bed_feature = bed file/dataframe of genomic features to map TFs to: 4-column format: [Chr, Start, End, Gene]
# tf_motif = file/dataframe mapping motifs to TF names, 2-column format: [Motif, TF]
# window_feature = window (# kb) around bed features to map to TFs (default: 10000)
# bed_active = bed file/dataframe of active genomic regions (eg, active histone markers) (default: NULL)
# Output:
# prior = 3-column prior network [TF Target Weight]
prior_proximal <- function(
bed_motif,
bed_feature,
tf_motif,
window_feature=10000,
bed_active=NULL
){
# load motif bed file
if (is.character(bed_motif)){
print(paste('Load motif bed file:',bed_motif))
bed_motif <- read.delim(bed_motif, header=FALSE, sep='\t')
}
bed_motif <- bed_motif[,1:4]
colnames(bed_motif) <- c('Chr', 'Start', 'End', 'Motif')
# load feature bed file
if (is.character(bed_feature)){
print(paste('Load feature bed file:',bed_feature))
bed_feature <- read.delim(bed_feature, header=FALSE, sep='\t')
}
bed_feature <- bed_feature[,1:4]
# load TF to motif map if necessary
if (is.character(tf_motif)){
print(paste('Load TF to motif info:',tf_motif))
tf_motif <- read.delim(tf_motif, header=FALSE, sep='\t')
}
tf_motif <- tf_motif[,1:2]
colnames(tf_motif) <- c('Motif','TF')
tf_motif <- data.frame(TF=tf_motif$TF, Motif=tf_motif$Motif)
# prune bed features if necessary
if (!is.null(bed_active)){
# load active bed
if (is.character(bed_active)){
print(paste('Load active bed file:',bed_active))
bed_active <- read.delim(bed_active, header=FALSE, sep='\t')
}
print(paste('Prune feature bed using active bed'))
bed_feature <- prune_bed_feature(bed_feature=bed_feature, bed_keep_these=bed_active)
}
colnames(bed_feature) <- c('Chr', 'Start', 'End', 'Gene')
# create temporary working directory
dir_wd <- basename(tempfile(pattern='tmp_dir_'))
dir.create(dir_wd, showWarnings=TRUE)
# create motif bed file
print('Create motif bed file')
write.table(merge(bed_motif, tf_motif, by='Motif')[,2:5], file.path(dir_wd,'tmp_bed_motif.bed'),
quote=FALSE, row.names=FALSE, col.names=FALSE, sep='\t')
# sort motif bed file
system(paste0('bedtools sort -i ', file.path(dir_wd,'tmp_bed_motif.bed'),' > ',
file.path(dir_wd,'tmp_bed_motif_sorted.bed')))
# save bed features to working
write.table(bed_feature, file.path(dir_wd,'tmp_bed_feature.bed'), quote=FALSE, sep='\t',
row.names=FALSE, col.names=FALSE)
# sort bed features file
system(paste0('bedtools sort -i ', file.path(dir_wd,'tmp_bed_feature.bed'),' > ',
file.path(dir_wd,'tmp_bed_feature_sorted.bed')))
# intersection of gene features and motifs
print('Window motifs with gene features')
cmd_window <- paste0("bedtools window -w ",window_feature," -a ",
dir_wd,"/tmp_bed_feature_sorted.bed -b ",
dir_wd,"/tmp_bed_motif_sorted.bed | awk -v OFS='\t' '{print $8,$4}' > ",
dir_wd,"/tmp_window_interactions.bed")
system(cmd_window)
# load interactions
df_int <- read.delim(file.path(dir_wd,'tmp_window_interactions.bed'), header=FALSE, sep='\t')
colnames(df_int) <- c('TF','Target')
# delete temporary working directory
system(paste0('rm -r ',dir_wd))
# count number of motifs in vicinity of each gene
print('Count TF-gene interactions')
name_int <- paste(df_int[,1], df_int[,2], sep='_')
idx_unique <- which(!duplicated(name_int))
unique_int <- df_int[idx_unique,]
name_unique_int <- paste(unique_int[,1], unique_int[,2], sep='_')
df_name_int <- as.data.frame(table(name_int))
df_name_int <- data.frame(Freq=df_name_int$Freq, row.names=df_name_int$name_int)
# build prior
print('Build prior')
prior <- data.frame(TF=unique_int$TF, Target=unique_int$Target, Weight=df_name_int[name_unique_int,1])
name_prior <- paste(prior$TF, prior$Target, sep='_')
sort_prior <- sort(name_prior, index.return=TRUE)
prior <- prior[sort_prior$ix,]
rownames(prior) <- 1:dim(prior)[1]
return(prior)
}
# prune_bed_feature
# Returns features specified by a bed file that overlap another bed file.
# Inputs:
# bed_feature = bed file of genomic features to prune
# bed_keep_these = keep the feature in bed_feature that overlap with these
# keep_unique = keep unique pruned bed file features (default: TRUE)
# Outputs
# bed_pruned = bed file of genomic features overlapped desired region
prune_bed_feature <- function(
bed_feature,
bed_keep_these,
keep_unique=TRUE
){
# create temporary working directory
dir_wd <- basename(tempfile(pattern='tmp_dir_'))
dir.create(dir_wd, showWarnings=TRUE)
# load bed files
if (is.character(bed_feature)){
bed_feature <- read.delim(bed_feature, header=FALSE, sep='\t')
}
if (is.character(bed_keep_these)){
bed_keep_these <- read.delim(bed_keep_these, header=FALSE, sep='\t')
}
# number of input bed columns
n_col_bed_feature <- dim(bed_feature)[2]
n_col_bed_keep_these <- dim(bed_keep_these)[2]
# write bed files to temporary working directory
write.table(bed_feature, file.path(dir_wd, 'bed_feature.bed'), quote=FALSE,
sep='\t', col.names=FALSE, row.names=FALSE)
write.table(bed_keep_these, file.path(dir_wd, 'bed_keep_these.bed'), quote=FALSE,
sep='\t', col.names=FALSE, row.names=FALSE)
# sort bed files
system(paste0('bedtools sort -i ',file.path(dir_wd, 'bed_feature.bed'),' > ',
file.path(dir_wd, 'bed_feature_sorted.bed')))
system(paste0('bedtools sort -i ',file.path(dir_wd, 'bed_keep_these.bed'),' > ',
file.path(dir_wd, 'bed_keep_these_sorted.bed')))
# prune bed features
cmd_window <- paste0('bedtools window -w 0 -a ', file.path(dir_wd,'bed_keep_these_sorted.bed'), ' -b ',
file.path(dir_wd, 'bed_feature.bed'),' > ', file.path(dir_wd,'bed_window.bed'))
system(cmd_window)
n_line_window <- length(readLines(file.path(dir_wd,'bed_window.bed')))
if (n_line_window > 0){
bed_pruned <- read.delim(file.path(dir_wd,'bed_window.bed'), header=FALSE, sep='\t')
bed_pruned <- bed_pruned[,(n_col_bed_keep_these+1):(n_col_bed_keep_these+n_col_bed_feature)]
# keep unique features
if (keep_unique){
name_feat <- NULL
for (ix in 1:n_col_bed_feature){
name_feat <- paste(name_feat, bed_pruned[,ix], sep='_')
}
idx_keep <- which(!duplicated(name_feat))
bed_pruned <- bed_pruned[idx_keep,]
}
} else {
bed_pruned <- NULL
}
# remove temporary working directory
system(paste0('rm -r ',dir_wd))
return(bed_pruned)
}
# save_data_matrix
# Save gene expression matrix.
# txt file format, tab-delimited, rows=genes, cols=cells
# Inputs:
# counts_mat = gene expression matrix
# filename = output file name
# out_digits = maximum number of significant digits to keep (default: 5)
save_data_matrix <- function(
counts_mat,
filename,
out_digits=5
){
write.table(signif(counts_mat, digits=out_digits), filename, quote=FALSE, row.names=TRUE, col.names=NA, sep='\t')
}
# load_data_matrix
# Load gene expression matrix.
# txt file format, tab-delimited, rows=genes, cols=cells
# header are unique cell names, first column is gene names
# Inputs:
# filename = filename string
# replace_na = replace NAs with zeros (default: FALSE)
# Output:
# counts_mat = gene expression matrix
load_data_matrix <- function(
filename,
replace_na=FALSE
){
counts_mat <- read.delim(filename, header=TRUE, row.names=1, check.names=FALSE)
if (replace_na){
counts_mat[is.na(counts_mat)] <- 0
}
counts_mat <- as.matrix(counts_mat)
return(counts_mat)
}
# net_prior_merge
# Create merged prior.
# Inputs:
# net_full = full network, format: [Targets X TFs]
# Output:
# net_out = list containing full network and degenerate TF info
net_prior_merge <- function(
net_full
){
# load network
if (is.character(net_full)){
print(paste('Load network:',net_full))
net_full <- load_data_matrix(net_full)
}
# unique string for each TF
n_tf <- dim(net_full)[2]
str_tf <- NULL
for (ix in 1:n_tf){
curr_str <- paste(net_full[,ix], collapse='|')
str_tf <- c(str_tf, curr_str)
}
# group TFs with same target profile
uniq_str_tf <- unique(str_tf)
n_uniq_str <- length(uniq_str_tf)
net_merged <- NULL
all_name <- NULL
merged_tfs <- NULL
for (ix in 1:n_uniq_str){
curr_str <- uniq_str_tf[ix]
idx_tf <- which(str_tf==curr_str)
curr_name <- paste(colnames(net_full)[idx_tf], collapse='_')
all_name <- c(all_name,curr_name)
net_merged <- cbind(net_merged, net_full[,idx_tf[1]])
if (length(idx_tf)>1){
curr_merged <- data.frame(name_merged=curr_name,
tfs_merged=paste(colnames(net_full)[idx_tf], collapse=', '))
merged_tfs <- rbind(merged_tfs, curr_merged)
}
}
colnames(net_merged) <- all_name
rownames(net_merged) <- rownames(net_full)
net_out <- list()
net_out[['Network']] <- net_merged
net_out[['MergedTFs']] <- merged_tfs
return(net_out)
}
|
# ui.R
shinyUI(fluidPage(
titlePanel("Statistical Power Calculator"),
sidebarLayout(
sidebarPanel(
helpText("Enter your values"),
selectInput("type", label ="Type of t.test",choices = c("two.sample", "one.sample", "paired")),
numericInput("testValue", label="Test Value",value=T),
numericInput("mean", label = "Sample Mean", value = T),
numericInput("SD", label = "Standard deviation of Sample", value = T),
numericInput("sampleSize", label ="Sample Size", value = T),
sliderInput("alpha", label ="Type 1 Error rate (Sig.level)", min=0.01, max=1,value=0.05,step = 0.01),
submitButton("Submit")
),
mainPanel(textOutput("summary"))
)
))
sliderInput("n", "N:", min = 10, max = 1000, value = 200,
step = 10) | /Creating_Data_Products/My-App1/ui.R | no_license | karan5291/CourseraDataScience | R | false | false | 783 | r | # ui.R
shinyUI(fluidPage(
titlePanel("Statistical Power Calculator"),
sidebarLayout(
sidebarPanel(
helpText("Enter your values"),
selectInput("type", label ="Type of t.test",choices = c("two.sample", "one.sample", "paired")),
numericInput("testValue", label="Test Value",value=T),
numericInput("mean", label = "Sample Mean", value = T),
numericInput("SD", label = "Standard deviation of Sample", value = T),
numericInput("sampleSize", label ="Sample Size", value = T),
sliderInput("alpha", label ="Type 1 Error rate (Sig.level)", min=0.01, max=1,value=0.05,step = 0.01),
submitButton("Submit")
),
mainPanel(textOutput("summary"))
)
))
sliderInput("n", "N:", min = 10, max = 1000, value = 200,
step = 10) |
library(hmmm)
data(drinks)
y<-cbind(drinks$lemon.tea,drinks$orange.juice,drinks$apple.juice)
fm<-c("l-l-l-l")
fmargobs<-marg.list(fm,mflag="m")
# saturated model (fsat<-~lat*tea*ojuice*ajuice is implicit)
# an example to calculate the starting values for the probabilities
model.obsf0<-hmmm.model(marg=fmargobs,
lev=c(2,3,3,3),names=c("lat","tea","ojuice","ajuice"))
modelsat<-hidden.emfit(y,model.obsf0,y.eps=0.01,maxit=5,
maxiter=2500,norm.diff.conv=0.001,printflag=10)
print(modelsat)
#starting values used in the next models
Ptr<-modelsat$Ptr
Ptobs<-modelsat$Ptobs
## model of constant association among tea, orange and apple juice sales given the latent states
fca<-~lat*tea*ojuice*ajuice-lat:tea:ojuice:ajuice-tea:ojuice:ajuice
model.obsfca<-hmmm.model(marg=fmargobs,
lev=c(2,3,3,3),names=c("lat","tea","ojuice","ajuice"),formula=fca)
modelca<-hidden.emfit(y,model.obsfca,y.eps=0.01,maxit=3,maxiter=2500,printflag=10,
old.tran.p=Ptr,bb=Ptobs)
print(modelca,printflag=TRUE)
## model of independence of tea sales from orange and apple juice sales given the latent states
find<-~lat*tea+lat*ojuice*ajuice
model.obsf<-hmmm.model(marg=fmargobs,
lev=c(2,3,3,3),names=c("lat","tea","ojuice","ajuice"),formula=find)
modelind<-hidden.emfit(y,model.obsf,y.eps=0.01,maxit=5,maxiter=2500,printflag=10,
old.tran.p=Ptr,bb=Ptobs)
print(modelind)
## model of total independence of tea, orange and apple juice sales given the latent states
findtot<-~lat*tea+lat*ojuice+lat*ajuice
model.obsftot<-hmmm.model(marg=fmargobs,
lev=c(2,3,3,3),names=c("lat","tea","ojuice","ajuice"),formula=findtot)
modelindtot<-hidden.emfit(y,model.obsftot,y.eps=0.01,maxit=5,maxiter=2500,printflag=10,
old.tran.p=Ptr,bb=Ptobs)
print(modelindtot)
| /demo/hiddenMarkov.R | no_license | cran/hmmm | R | false | false | 1,808 | r | library(hmmm)
data(drinks)
y<-cbind(drinks$lemon.tea,drinks$orange.juice,drinks$apple.juice)
fm<-c("l-l-l-l")
fmargobs<-marg.list(fm,mflag="m")
# saturated model (fsat<-~lat*tea*ojuice*ajuice is implicit)
# an example to calculate the starting values for the probabilities
model.obsf0<-hmmm.model(marg=fmargobs,
lev=c(2,3,3,3),names=c("lat","tea","ojuice","ajuice"))
modelsat<-hidden.emfit(y,model.obsf0,y.eps=0.01,maxit=5,
maxiter=2500,norm.diff.conv=0.001,printflag=10)
print(modelsat)
#starting values used in the next models
Ptr<-modelsat$Ptr
Ptobs<-modelsat$Ptobs
## model of constant association among tea, orange and apple juice sales given the latent states
fca<-~lat*tea*ojuice*ajuice-lat:tea:ojuice:ajuice-tea:ojuice:ajuice
model.obsfca<-hmmm.model(marg=fmargobs,
lev=c(2,3,3,3),names=c("lat","tea","ojuice","ajuice"),formula=fca)
modelca<-hidden.emfit(y,model.obsfca,y.eps=0.01,maxit=3,maxiter=2500,printflag=10,
old.tran.p=Ptr,bb=Ptobs)
print(modelca,printflag=TRUE)
## model of independence of tea sales from orange and apple juice sales given the latent states
find<-~lat*tea+lat*ojuice*ajuice
model.obsf<-hmmm.model(marg=fmargobs,
lev=c(2,3,3,3),names=c("lat","tea","ojuice","ajuice"),formula=find)
modelind<-hidden.emfit(y,model.obsf,y.eps=0.01,maxit=5,maxiter=2500,printflag=10,
old.tran.p=Ptr,bb=Ptobs)
print(modelind)
## model of total independence of tea, orange and apple juice sales given the latent states
findtot<-~lat*tea+lat*ojuice+lat*ajuice
model.obsftot<-hmmm.model(marg=fmargobs,
lev=c(2,3,3,3),names=c("lat","tea","ojuice","ajuice"),formula=findtot)
modelindtot<-hidden.emfit(y,model.obsftot,y.eps=0.01,maxit=5,maxiter=2500,printflag=10,
old.tran.p=Ptr,bb=Ptobs)
print(modelindtot)
|
\name{macf.dat}
\alias{macf.dat}
\title{Female macaque data}
\description{Female macaque skull data. 7 landmarks
in 3 dimensions, 9 individuals
}
\usage{data(macf.dat)}
\format{
An array of dimension 7 x 3 x 9
}
\source{
Dryden, I.L. and Mardia, K.V. (2016). Statistical
Shape Analysis, with Applications in R (Second Edition). Wiley, Chichester. Chapter 1.
}
\references{
Data from Paul O'Higgins (Hull-York Medical School)
}
\examples{
data(macf.dat)
plotshapes(macf.dat)
}
\keyword{datasets}
| /man/macf.dat.Rd | no_license | cran/shapes | R | false | false | 498 | rd | \name{macf.dat}
\alias{macf.dat}
\title{Female macaque data}
\description{Female macaque skull data. 7 landmarks
in 3 dimensions, 9 individuals
}
\usage{data(macf.dat)}
\format{
An array of dimension 7 x 3 x 9
}
\source{
Dryden, I.L. and Mardia, K.V. (2016). Statistical
Shape Analysis, with Applications in R (Second Edition). Wiley, Chichester. Chapter 1.
}
\references{
Data from Paul O'Higgins (Hull-York Medical School)
}
\examples{
data(macf.dat)
plotshapes(macf.dat)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/residuals-methods.R
\name{predict.pense_cvfit}
\alias{predict.pense_cvfit}
\title{Predict Method for PENSE Fits}
\usage{
\method{predict}{pense_cvfit}(
object,
newdata,
alpha = NULL,
lambda = "min",
se_mult = 1,
exact = deprecated(),
correction = deprecated(),
...
)
}
\arguments{
\item{object}{PENSE with cross-validated hyper-parameters to extract coefficients from.}
\item{newdata}{an optional matrix of new predictor values.
If missing, the fitted values are computed.}
\item{alpha}{Either a single number or \code{NULL} (default).
If given, only fits with the given \code{alpha} value are considered.
If \code{lambda} is a numeric value and \code{object} was fit with multiple \emph{alpha}
values and no value is provided, the first value in \code{object$alpha} is used with a warning.}
\item{lambda}{either a string specifying which penalty level to use
(\code{"min"}, \code{"se"}, \verb{"\{m\}-se}")
or a single numeric value of the penalty parameter. See details.}
\item{se_mult}{If \code{lambda = "se"}, the multiple of standard errors to tolerate.}
\item{exact}{deprecated. Always gives a warning if \code{lambda} is not part of the
fitted sequence and coefficients are interpolated.}
\item{correction}{defunct.}
\item{...}{currently not used.}
}
\value{
a numeric vector of residuals for the given penalization level.
}
\description{
Predict response values using a PENSE (or LS-EN) regularization path with
hyper-parameters chosen by cross-validation.
}
\section{Hyper-parameters}{
If \code{lambda = "{m}-se"} and \code{object} contains fitted estimates for every penalization
level in the sequence, use the fit the most parsimonious model with prediction performance
statistically indistinguishable from the best model.
This is determined to be the model with prediction performance within \code{m * cv_se}
from the best model.
If \code{lambda = "se"}, the multiplier \emph{m} is taken from \code{se_mult}.
By default all \emph{alpha} hyper-parameters available in the fitted object are considered.
This can be overridden by supplying one or multiple values in parameter \code{alpha}.
For example, if \code{lambda = "1-se"} and \code{alpha} contains two values, the "1-SE" rule is applied
individually for each \code{alpha} value, and the fit with the better prediction error is considered.
In case \code{lambda} is a number and \code{object} was fit for several \emph{alpha} hyper-parameters,
\code{alpha} must also be given, or the first value in \code{object$alpha} is used with a warning.
}
\examples{
# Compute the LS-EN regularization path for Freeny's revenue data
# (see ?freeny)
data(freeny)
x <- as.matrix(freeny[ , 2:5])
regpath <- elnet(x, freeny$y, alpha = 0.75)
# Predict the response using a specific penalization level
predict(regpath, newdata = freeny[1:5, 2:5],
lambda = regpath$lambda[[1]][[10]])
# Extract the residuals at a certain penalization level
residuals(regpath, lambda = regpath$lambda[[1]][[5]])
# Select penalization level via cross-validation
set.seed(123)
cv_results <- elnet_cv(x, freeny$y, alpha = 0.5,
cv_repl = 10, cv_k = 4)
# Predict the response using the "best" penalization level
predict(cv_results, newdata = freeny[1:5, 2:5])
# Extract the residuals at the "best" penalization level
residuals(cv_results)
# Extract the residuals at a more parsimonious penalization level
residuals(cv_results, lambda = "1.5-se")
}
\seealso{
Other functions for extracting components:
\code{\link{coef.pense_cvfit}()},
\code{\link{coef.pense_fit}()},
\code{\link{predict.pense_fit}()},
\code{\link{residuals.pense_cvfit}()},
\code{\link{residuals.pense_fit}()}
}
\concept{functions for extracting components}
| /man/predict.pense_cvfit.Rd | no_license | cran/pense | R | false | true | 3,791 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/residuals-methods.R
\name{predict.pense_cvfit}
\alias{predict.pense_cvfit}
\title{Predict Method for PENSE Fits}
\usage{
\method{predict}{pense_cvfit}(
object,
newdata,
alpha = NULL,
lambda = "min",
se_mult = 1,
exact = deprecated(),
correction = deprecated(),
...
)
}
\arguments{
\item{object}{PENSE with cross-validated hyper-parameters to extract coefficients from.}
\item{newdata}{an optional matrix of new predictor values.
If missing, the fitted values are computed.}
\item{alpha}{Either a single number or \code{NULL} (default).
If given, only fits with the given \code{alpha} value are considered.
If \code{lambda} is a numeric value and \code{object} was fit with multiple \emph{alpha}
values and no value is provided, the first value in \code{object$alpha} is used with a warning.}
\item{lambda}{either a string specifying which penalty level to use
(\code{"min"}, \code{"se"}, \verb{"\{m\}-se}")
or a single numeric value of the penalty parameter. See details.}
\item{se_mult}{If \code{lambda = "se"}, the multiple of standard errors to tolerate.}
\item{exact}{deprecated. Always gives a warning if \code{lambda} is not part of the
fitted sequence and coefficients are interpolated.}
\item{correction}{defunct.}
\item{...}{currently not used.}
}
\value{
a numeric vector of residuals for the given penalization level.
}
\description{
Predict response values using a PENSE (or LS-EN) regularization path with
hyper-parameters chosen by cross-validation.
}
\section{Hyper-parameters}{
If \code{lambda = "{m}-se"} and \code{object} contains fitted estimates for every penalization
level in the sequence, use the fit the most parsimonious model with prediction performance
statistically indistinguishable from the best model.
This is determined to be the model with prediction performance within \code{m * cv_se}
from the best model.
If \code{lambda = "se"}, the multiplier \emph{m} is taken from \code{se_mult}.
By default all \emph{alpha} hyper-parameters available in the fitted object are considered.
This can be overridden by supplying one or multiple values in parameter \code{alpha}.
For example, if \code{lambda = "1-se"} and \code{alpha} contains two values, the "1-SE" rule is applied
individually for each \code{alpha} value, and the fit with the better prediction error is considered.
In case \code{lambda} is a number and \code{object} was fit for several \emph{alpha} hyper-parameters,
\code{alpha} must also be given, or the first value in \code{object$alpha} is used with a warning.
}
\examples{
# Compute the LS-EN regularization path for Freeny's revenue data
# (see ?freeny)
data(freeny)
x <- as.matrix(freeny[ , 2:5])
regpath <- elnet(x, freeny$y, alpha = 0.75)
# Predict the response using a specific penalization level
predict(regpath, newdata = freeny[1:5, 2:5],
lambda = regpath$lambda[[1]][[10]])
# Extract the residuals at a certain penalization level
residuals(regpath, lambda = regpath$lambda[[1]][[5]])
# Select penalization level via cross-validation
set.seed(123)
cv_results <- elnet_cv(x, freeny$y, alpha = 0.5,
cv_repl = 10, cv_k = 4)
# Predict the response using the "best" penalization level
predict(cv_results, newdata = freeny[1:5, 2:5])
# Extract the residuals at the "best" penalization level
residuals(cv_results)
# Extract the residuals at a more parsimonious penalization level
residuals(cv_results, lambda = "1.5-se")
}
\seealso{
Other functions for extracting components:
\code{\link{coef.pense_cvfit}()},
\code{\link{coef.pense_fit}()},
\code{\link{predict.pense_fit}()},
\code{\link{residuals.pense_cvfit}()},
\code{\link{residuals.pense_fit}()}
}
\concept{functions for extracting components}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fslexp.help.R
\name{fslexp.help}
\alias{fslexp.help}
\title{fslexp Help}
\usage{
fslexp.help(...)
}
\arguments{
\item{...}{passed to \code{\link{fslmaths.help}}}
}
\value{
Prints help output and returns output as character vector
}
\description{
This function calls \code{fslmaths}'s help, as
\code{fslexp} is a wrapper for \code{fslmaths}
}
\examples{
if (have.fsl()){
fslexp.help()
}
}
| /man/fslexp.help.Rd | no_license | muschellij2/fslr | R | false | true | 469 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fslexp.help.R
\name{fslexp.help}
\alias{fslexp.help}
\title{fslexp Help}
\usage{
fslexp.help(...)
}
\arguments{
\item{...}{passed to \code{\link{fslmaths.help}}}
}
\value{
Prints help output and returns output as character vector
}
\description{
This function calls \code{fslmaths}'s help, as
\code{fslexp} is a wrapper for \code{fslmaths}
}
\examples{
if (have.fsl()){
fslexp.help()
}
}
|
# US Data
kaggle <- read.csv(url("https://raw.githubusercontent.com/Reinalynn/MSDS692/master/Data/kaggle.csv"), header = TRUE, stringsAsFactors = FALSE)
# Convert data to time series
train_US <- kaggle %>% filter(Country_Region == "US") %>% filter(Province_State == "")
tsUS <- ts(train_US[, 6:7], start = c(2020, 23), frequency = 365)
autoplot(tsUS)
# Create train and test data
US_train <- tsUS %>% window(end = c(2020, 120))
US_test <- tsUS %>% window(start = c(2020, 121), end = c(2020, 130))
# Build models for US cases and deaths
fit_casesUS <- auto.arima(tsUS[, "Cases"], stepwise = FALSE, approximation = FALSE)
fit_casesUS # ARIMA(3, 1, 0), AICc - 1941.55
checkresiduals(fit_casesUS) # p-value too low
accuracy(fit_casesUS)
fit_casesUS2 <- arima(US_train[, "Cases"], order = c(6, 1, 1))
checkresiduals(fit_casesUS2) # still too low, but closest to 0.05
accuracy(fit_casesUS2) # more accurate than (3, 1, 0) model
fit_deathsUS <- auto.arima(US_train[, "Deaths"], stepwise = FALSE, approximation = FALSE)
fit_deathsUS # ARIMA(3, 1, 2), AICc - 1419.15
checkresiduals(fit_deathsUS) # passes
# Use best models to forecast further ahead
fc_10_US <- sarima.for(tsUS[, "Cases"], n.ahead = 10, 6, 1, 1)
fc_10_US$pred
actual_US <- c(19710, 18618, 21693, 20832, 27368, 25050, 24994, 18937, 21551, 20260) # actual US cases for 5/10 = 5/19
RMSE(fc_10_US$pred, actual_US)/mean(actual_US) # 0.10 VERY GOOD
fcd_10_US <- sarima.for(tsUS[, "Deaths"], n.ahead = 10, 3, 1, 2)
fcd_10_US$pred
actual_USd <- c(731, 1156, 1694, 1743, 1779, 1632, 1224, 808, 785, 1574)
RMSE(fcd_10_US$pred, actual_USd)/mean(actual_USd) # 0.53 NOT AS GOOD AS CASES
# models show cases declining while deaths are steady | /Code/Country level/ts_forecasting_US.R | permissive | Reinalynn/Forecasting-COVID-19-Cases-and-Deaths-Using-Time-Series-in-R | R | false | false | 1,685 | r | # US Data
kaggle <- read.csv(url("https://raw.githubusercontent.com/Reinalynn/MSDS692/master/Data/kaggle.csv"), header = TRUE, stringsAsFactors = FALSE)
# Convert data to time series
train_US <- kaggle %>% filter(Country_Region == "US") %>% filter(Province_State == "")
tsUS <- ts(train_US[, 6:7], start = c(2020, 23), frequency = 365)
autoplot(tsUS)
# Create train and test data
US_train <- tsUS %>% window(end = c(2020, 120))
US_test <- tsUS %>% window(start = c(2020, 121), end = c(2020, 130))
# Build models for US cases and deaths
fit_casesUS <- auto.arima(tsUS[, "Cases"], stepwise = FALSE, approximation = FALSE)
fit_casesUS # ARIMA(3, 1, 0), AICc - 1941.55
checkresiduals(fit_casesUS) # p-value too low
accuracy(fit_casesUS)
fit_casesUS2 <- arima(US_train[, "Cases"], order = c(6, 1, 1))
checkresiduals(fit_casesUS2) # still too low, but closest to 0.05
accuracy(fit_casesUS2) # more accurate than (3, 1, 0) model
fit_deathsUS <- auto.arima(US_train[, "Deaths"], stepwise = FALSE, approximation = FALSE)
fit_deathsUS # ARIMA(3, 1, 2), AICc - 1419.15
checkresiduals(fit_deathsUS) # passes
# Use best models to forecast further ahead
fc_10_US <- sarima.for(tsUS[, "Cases"], n.ahead = 10, 6, 1, 1)
fc_10_US$pred
actual_US <- c(19710, 18618, 21693, 20832, 27368, 25050, 24994, 18937, 21551, 20260) # actual US cases for 5/10 = 5/19
RMSE(fc_10_US$pred, actual_US)/mean(actual_US) # 0.10 VERY GOOD
fcd_10_US <- sarima.for(tsUS[, "Deaths"], n.ahead = 10, 3, 1, 2)
fcd_10_US$pred
actual_USd <- c(731, 1156, 1694, 1743, 1779, 1632, 1224, 808, 785, 1574)
RMSE(fcd_10_US$pred, actual_USd)/mean(actual_USd) # 0.53 NOT AS GOOD AS CASES
# models show cases declining while deaths are steady |
# Building a Prod-Ready, Robust Shiny Application.
#
# README: each step of the dev files is optional, and you don't have to
# fill every dev scripts before getting started.
# 01_start.R should be filled at start.
# 02_dev.R should be used to keep track of your development during the project.
# 03_deploy.R should be used once you need to deploy your app.
#
#
###################################
#### CURRENT FILE: DEV SCRIPT #####
###################################
# Engineering
## Dependencies ----
## Add one line by package you want to add as dependency
usethis::use_package( "dplyr" )
usethis::use_package( "dygraphs" )
usethis::use_package( "ggplot2" )
usethis::use_package( "googlesheets4" )
usethis::use_package( "lubridate" )
usethis::use_package( "shinydashboard" )
usethis::use_package( "shinydashboardPlus" )
usethis::use_package( "simputation" )
usethis::use_package( "plotly" )
usethis::use_package( "readxl" )
usethis::use_package( "xts" )
## Add modules ----
## Create a module infrastructure in R/
#golem::add_module( name = "electricity_cost_yesterday_text" ) # Name of the module
#golem::add_module( name = "electricity_usage_yesterday_text" ) # Name of the module
#golem::add_module( name = "electricity_usage_plot" ) # Name of the module
#golem::add_module( name = "electricity_total_cost_plot" ) # Name of the module
#golem::add_module( name = "electricity_bills_plot" ) # Name of the module
#golem::add_module( name = "electricity_annual_cost_plot" ) # Name of the module
#golem::add_module( name = "gas_cost_yesterday_text" ) # Name of the module
#golem::add_module( name = "gas_usage_yesterday_text" ) # Name of the module
#golem::add_module( name = "gas_usage_plot" ) # Name of the module
#golem::add_module( name = "gas_total_cost_plot" ) # Name of the module
#golem::add_module( name = "gas_bills_plot" ) # Name of the module
#golem::add_module( name = "gas_annual_cost_plot" ) # Name of the module
golem::add_module( name = "annual_cost_plot" ) # Name of the module
golem::add_module( name = "bills_plot" ) # Name of the module
golem::add_module( name = "total_cost_plot" ) # Name of the module
golem::add_module( name = "usage_plot" ) # Name of the module
golem::add_module( name = "readings_plot" ) # Name of the module
golem::add_module( name = "usage_trend_plot" ) # Name of the module
golem::add_module( name = "var_select" ) # Name of the module
golem::add_module( name = "cum_usage_plot" ) # Name of the module
## Add helper functions ----
## Creates ftc_* and utils_*
golem::add_fct( "helpers" )
golem::add_utils( "helpers" )
## External resources
## Creates .js and .css files at inst/app/www
golem::add_js_file( "script" )
golem::add_js_handler( "handlers" )
golem::add_css_file( "custom" )
## Add internal datasets ----
## If you have data in your package
usethis::use_data_raw( name = "energy", open = FALSE )
## Tests ----
## Add one line by test you want to create
usethis::use_test( "app" )
# Documentation
## Vignette ----
usethis::use_vignette("energyuse")
devtools::build_vignettes()
## Code coverage ----
## (You'll need GitHub there)
usethis::use_github()
usethis::use_travis()
usethis::use_appveyor()
# You're now set! ----
# go to dev/03_deploy.R
rstudioapi::navigateToFile("dev/03_deploy.R")
| /dev/02_dev.R | permissive | MHenderson/energy-use | R | false | false | 3,260 | r | # Building a Prod-Ready, Robust Shiny Application.
#
# README: each step of the dev files is optional, and you don't have to
# fill every dev scripts before getting started.
# 01_start.R should be filled at start.
# 02_dev.R should be used to keep track of your development during the project.
# 03_deploy.R should be used once you need to deploy your app.
#
#
###################################
#### CURRENT FILE: DEV SCRIPT #####
###################################
# Engineering
## Dependencies ----
## Add one line by package you want to add as dependency
usethis::use_package( "dplyr" )
usethis::use_package( "dygraphs" )
usethis::use_package( "ggplot2" )
usethis::use_package( "googlesheets4" )
usethis::use_package( "lubridate" )
usethis::use_package( "shinydashboard" )
usethis::use_package( "shinydashboardPlus" )
usethis::use_package( "simputation" )
usethis::use_package( "plotly" )
usethis::use_package( "readxl" )
usethis::use_package( "xts" )
## Add modules ----
## Create a module infrastructure in R/
#golem::add_module( name = "electricity_cost_yesterday_text" ) # Name of the module
#golem::add_module( name = "electricity_usage_yesterday_text" ) # Name of the module
#golem::add_module( name = "electricity_usage_plot" ) # Name of the module
#golem::add_module( name = "electricity_total_cost_plot" ) # Name of the module
#golem::add_module( name = "electricity_bills_plot" ) # Name of the module
#golem::add_module( name = "electricity_annual_cost_plot" ) # Name of the module
#golem::add_module( name = "gas_cost_yesterday_text" ) # Name of the module
#golem::add_module( name = "gas_usage_yesterday_text" ) # Name of the module
#golem::add_module( name = "gas_usage_plot" ) # Name of the module
#golem::add_module( name = "gas_total_cost_plot" ) # Name of the module
#golem::add_module( name = "gas_bills_plot" ) # Name of the module
#golem::add_module( name = "gas_annual_cost_plot" ) # Name of the module
golem::add_module( name = "annual_cost_plot" ) # Name of the module
golem::add_module( name = "bills_plot" ) # Name of the module
golem::add_module( name = "total_cost_plot" ) # Name of the module
golem::add_module( name = "usage_plot" ) # Name of the module
golem::add_module( name = "readings_plot" ) # Name of the module
golem::add_module( name = "usage_trend_plot" ) # Name of the module
golem::add_module( name = "var_select" ) # Name of the module
golem::add_module( name = "cum_usage_plot" ) # Name of the module
## Add helper functions ----
## Creates ftc_* and utils_*
golem::add_fct( "helpers" )
golem::add_utils( "helpers" )
## External resources
## Creates .js and .css files at inst/app/www
golem::add_js_file( "script" )
golem::add_js_handler( "handlers" )
golem::add_css_file( "custom" )
## Add internal datasets ----
## If you have data in your package
usethis::use_data_raw( name = "energy", open = FALSE )
## Tests ----
## Add one line by test you want to create
usethis::use_test( "app" )
# Documentation
## Vignette ----
usethis::use_vignette("energyuse")
devtools::build_vignettes()
## Code coverage ----
## (You'll need GitHub there)
usethis::use_github()
usethis::use_travis()
usethis::use_appveyor()
# You're now set! ----
# go to dev/03_deploy.R
rstudioapi::navigateToFile("dev/03_deploy.R")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.