content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
set_makevars <- function(envs, path = file.path("~", ".R", "Makevars")) {
if (length(envs) == 0) {
return()
}
stopifnot(is.named(envs))
old <- NULL
if (file.exists(path)) {
lines <- readLines(path)
old <- lines
for (env in names(envs)) {
loc <- grep(rex::rex(start, any_spaces, env, any_spaces, "="), lines)
if (length(loc) == 0) {
lines <- append(lines, paste(sep = "=", env, envs[env]))
} else if(length(loc) == 1) {
lines[loc] <- paste(sep = "=", env, envs[env])
} else {
stop("Multiple results for ", env, " found, something is wrong.", .call = FALSE)
}
}
} else {
lines <- paste(names(envs), envs, sep = "=")
}
dir.create(file.path("~", ".R"), showWarnings = FALSE, recursive = TRUE)
if (!identical(old, lines)) {
file.rename(path, backup_name(path))
writeLines(con = path, lines)
}
old
}
reset_makevars <- function(path = file.path("~", ".R", "Makevars")) {
if (file.exists(backup_name(path))) {
file.rename(backup_name(path), path)
}
}
backup_name <- function(file) {
paste0(file, ".bak")
}
| /R/makevars.R | no_license | johnsdp/covr | R | false | false | 1,123 | r | set_makevars <- function(envs, path = file.path("~", ".R", "Makevars")) {
if (length(envs) == 0) {
return()
}
stopifnot(is.named(envs))
old <- NULL
if (file.exists(path)) {
lines <- readLines(path)
old <- lines
for (env in names(envs)) {
loc <- grep(rex::rex(start, any_spaces, env, any_spaces, "="), lines)
if (length(loc) == 0) {
lines <- append(lines, paste(sep = "=", env, envs[env]))
} else if(length(loc) == 1) {
lines[loc] <- paste(sep = "=", env, envs[env])
} else {
stop("Multiple results for ", env, " found, something is wrong.", .call = FALSE)
}
}
} else {
lines <- paste(names(envs), envs, sep = "=")
}
dir.create(file.path("~", ".R"), showWarnings = FALSE, recursive = TRUE)
if (!identical(old, lines)) {
file.rename(path, backup_name(path))
writeLines(con = path, lines)
}
old
}
reset_makevars <- function(path = file.path("~", ".R", "Makevars")) {
if (file.exists(backup_name(path))) {
file.rename(backup_name(path), path)
}
}
backup_name <- function(file) {
paste0(file, ".bak")
}
|
data_PI_test <- c(0.6522499918937683,
0.6427500247955322,
0.6375687718391418,
0.6438938975334167,
0.6309999823570251,
0.6365000009536743,
0.6397500038146973,
0.6406015157699585)
data_fusion_test <- c(0.668500695591407,
0.6526631821346569,
0.6380345198058224,
0.6514943237302898,
0.6389097456009157,
0.6238589609311762,
0.6474118629569678,
0.639614867891269)
t.test(data_PI_test,data_fusion_test) | /Codigos R/Test SE t-student.R | no_license | fondef-car-robbery/POI-Imagenes | R | false | false | 688 | r | data_PI_test <- c(0.6522499918937683,
0.6427500247955322,
0.6375687718391418,
0.6438938975334167,
0.6309999823570251,
0.6365000009536743,
0.6397500038146973,
0.6406015157699585)
data_fusion_test <- c(0.668500695591407,
0.6526631821346569,
0.6380345198058224,
0.6514943237302898,
0.6389097456009157,
0.6238589609311762,
0.6474118629569678,
0.639614867891269)
t.test(data_PI_test,data_fusion_test) |
#' Export ggplot
#'
#' This function exports a ggplot2 object to a pdf, eps and png file. Code credited to https://gist.github.com/sheymann/2399659
#' @param gplot ggplot2 object
#' @param filename character string
#' @param width width of plot in inches
#' @param height height of plot in inches
#' @keywords ggplot2 pdf png eps
#' @export
#' @examples
#' is.outlier()
## Identifies outliers in a vector
ExportPlot <- function(gplot, filename, width=2, height=1.5) {
# Export plot in PDF and EPS.
# Notice that A4: width=11.69, height=8.27
ggsave(paste(filename, '.pdf', sep=""), gplot, width = width, height = height)
postscript(file = paste(filename, '.eps', sep=""), width = width, height = height)
print(gplot)
dev.off()
png(file = paste(filename, '_.png', sep=""), width = width * 100, height = height * 100)
print(gplot)
dev.off()
}
| /R/exportPlot.R | no_license | nzwormgirl/Amy | R | false | false | 860 | r | #' Export ggplot
#'
#' This function exports a ggplot2 object to a pdf, eps and png file. Code credited to https://gist.github.com/sheymann/2399659
#' @param gplot ggplot2 object
#' @param filename character string
#' @param width width of plot in inches
#' @param height height of plot in inches
#' @keywords ggplot2 pdf png eps
#' @export
#' @examples
#' is.outlier()
## Identifies outliers in a vector
ExportPlot <- function(gplot, filename, width=2, height=1.5) {
# Export plot in PDF and EPS.
# Notice that A4: width=11.69, height=8.27
ggsave(paste(filename, '.pdf', sep=""), gplot, width = width, height = height)
postscript(file = paste(filename, '.eps', sep=""), width = width, height = height)
print(gplot)
dev.off()
png(file = paste(filename, '_.png', sep=""), width = width * 100, height = height * 100)
print(gplot)
dev.off()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_params.R
\name{plot_params}
\alias{plot_params}
\title{Plot parameter histograms}
\usage{
plot_params(year, model, model_name)
}
\arguments{
\item{year}{model year}
\item{model}{foler the model is in}
\item{model_name}{e.g., goa_nr_2020}
}
\value{
}
\description{
Plot parameter histograms
}
| /man/plot_params.Rd | no_license | BenWilliams-NOAA/rockfishr | R | false | true | 378 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_params.R
\name{plot_params}
\alias{plot_params}
\title{Plot parameter histograms}
\usage{
plot_params(year, model, model_name)
}
\arguments{
\item{year}{model year}
\item{model}{foler the model is in}
\item{model_name}{e.g., goa_nr_2020}
}
\value{
}
\description{
Plot parameter histograms
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 918
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 918
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_eequery_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 327
c no.of clauses 918
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 918
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_eequery_1344.qdimacs 327 918 E1 [] 0 4 323 918 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_eequery_1344/exquery_eequery_1344.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 701 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 918
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 918
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_eequery_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 327
c no.of clauses 918
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 918
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_eequery_1344.qdimacs 327 918 E1 [] 0 4 323 918 NONE
|
require(plotrix)
xvals = c(-1.9137300, -0.9354530, 0.6309640, 0.3108570, 0.0431084)
yvals = c(-0.799904, -0.493735, -0.653075, -0.018258, 1.253210)
rvals = c(2.040010, 0.959304, 0.728477, 0.301885, 1.190120)
sample = data.frame(x=xvals, y=yvals, radius=rvals)
#
# maybe needed require(RCurl)
#temporaryFile <- tempfile()
#download.file("http://localhost:4000/trilaterate.csv",destfile=temporaryFile, method="curl")
#read.csv(temporaryFile)
# sample data set
# x y radius error
# 1 -1.9137300 -0.799904 2.040010 0.2402898
# 2 -0.9354530 -0.493735 0.959304 1.0866445
# 3 0.6309640 -0.653075 0.728477 1.8843792
# 4 0.3108570 -0.018258 0.301885 10.9727866
# 5 0.0431084 1.253210 1.190120 0.7060224
# sample <- read.csv("sample.csv")
errors <- 1/sample$radius^2
fit <- nls(radius ~ sqrt(abs((x-x0))^2+abs((y-y0))^2), data = sample,
start=list(x0=sample[1,]$x, y0=sample[1,]$y), weights = errors)
solution <- summary(fit)$coefficients[,1]
print(solution)
print(summary(fit))
plot(sample$x, sample$y)
for(i in 1:nrow(sample)) {
row <- sample[i,]
draw.circle(row$x, row$y, row$radius)
}
draw.circle(solution["x0"], solution["y0"], 0.02,col="red")
x_w <- sum(sample$x*(1/sample$radius)) / sum(sample$radius)
y_w <- sum(sample$y*(1/sample$radius)) / sum(sample$radius)
draw.circle(x_w, y_w, 0.02,col="blue")
print(x_w)
print(y_w)
rm(i)
rm(row)
print(summary(fit)) | /server/r/trilaterate.R | no_license | teocci/witrack | R | false | false | 1,424 | r | require(plotrix)
xvals = c(-1.9137300, -0.9354530, 0.6309640, 0.3108570, 0.0431084)
yvals = c(-0.799904, -0.493735, -0.653075, -0.018258, 1.253210)
rvals = c(2.040010, 0.959304, 0.728477, 0.301885, 1.190120)
sample = data.frame(x=xvals, y=yvals, radius=rvals)
#
# maybe needed require(RCurl)
#temporaryFile <- tempfile()
#download.file("http://localhost:4000/trilaterate.csv",destfile=temporaryFile, method="curl")
#read.csv(temporaryFile)
# sample data set
# x y radius error
# 1 -1.9137300 -0.799904 2.040010 0.2402898
# 2 -0.9354530 -0.493735 0.959304 1.0866445
# 3 0.6309640 -0.653075 0.728477 1.8843792
# 4 0.3108570 -0.018258 0.301885 10.9727866
# 5 0.0431084 1.253210 1.190120 0.7060224
# sample <- read.csv("sample.csv")
errors <- 1/sample$radius^2
fit <- nls(radius ~ sqrt(abs((x-x0))^2+abs((y-y0))^2), data = sample,
start=list(x0=sample[1,]$x, y0=sample[1,]$y), weights = errors)
solution <- summary(fit)$coefficients[,1]
print(solution)
print(summary(fit))
plot(sample$x, sample$y)
for(i in 1:nrow(sample)) {
row <- sample[i,]
draw.circle(row$x, row$y, row$radius)
}
draw.circle(solution["x0"], solution["y0"], 0.02,col="red")
x_w <- sum(sample$x*(1/sample$radius)) / sum(sample$radius)
y_w <- sum(sample$y*(1/sample$radius)) / sum(sample$radius)
draw.circle(x_w, y_w, 0.02,col="blue")
print(x_w)
print(y_w)
rm(i)
rm(row)
print(summary(fit)) |
# Get more colors!
palette(c("black", "green", "blue", "cyan", "magenta", "yellow", "grey", "purple", "brown", "violet", "pink", "orange", "darkgreen", "khaki", "darkorange", "red"))
# Black will be used for vectors classified as noise (cluster 0 for dbscan)
#### Start ####
# install.packages("BBmisc")
# install.packages("ppclust")
# install.packages("fclust")
# install.packages("dbscan")
# install.packages("EMCluster")
# install.packages("advclust")
library(EMCluster)
library(BBmisc)
library(ppclust)
library(fclust)
library(dbscan)
library(advclust)
#### Datasets ####
file <- "/Users/rafal/Documents/Studia/4GL/4GL_Zad2/Datasets/2d_k15_little_overlap.txt"
data <- read.table(file = file, sep = "" , header = F , nrows = 5000,
na.strings = "", stringsAsFactors = F)
data = normalize(data, method = "standardize", range = c(0, 1))
str(data)
summary(data)
plot(data, pch = 16, cex = .3)
# file <- "/Users/rafal/Documents/Studia/4GL/4GL_Zad2/Datasets/2d_k15_much_overlap.txt"
# data <- read.table(file = file, sep = "" , header = F , nrows = 5000,
# na.strings = "", stringsAsFactors = F)
# data = normalize(data, method = "standardize", range = c(0, 1))
# plot(data, pch = 16, cex = .3)
#
# file <- "/Users/rafal/Documents/Studia/4GL/4GL_Zad2/Datasets/5d_k2_thyroid.txt"
# data <- read.table(file = file, sep = "" , header = F , nrows = 5000,
# na.strings = "", stringsAsFactors = F)
# data = normalize(data, method = "standardize", range = c(0, 1))
# plot(data, pch = 16, cex = .3)
#plot(data, col = res.pcm$cluster, pch = 16, cex = .5)
#### HCM ####
res.hcm <- hcm(data, centers = 15, iter.max = 100, dmetric = "euclidean")
hullplot(data, res.hcm, pch = 16, cex = .5, main = "HCM with dmetric = euclidean")
#### FCM ####
# res.fcm <- fcm(data, centers = 15, iter.max = 100, m = 2, dmetric = "sqeuclidean")
# hullplot(data, res.fcm, pch = 16, cex = .5, main = "")
res.fcm.m2dmetricsqeuclidean <- fcm(data, centers = 15, iter.max = 100, m = 2, dmetric = "sqeuclidean")
hullplot(data, res.fcm.m2dmetricsqeuclidean, pch = 16, cex = .5, main = "FCM with m = 2, dmetric = sqeuclidean")
res.fcm.m2dmetriceuclidean <- fcm(data, centers = 15, iter.max = 100, m = 2, dmetric = "euclidean")
hullplot(data, res.fcm.m2dmetriceuclidean, pch = 16, cex = .5, main = "FCM with m = 2, dmetric = euclidean")
res.fcm.m2dmetriccorrelation <- fcm(data, centers = 15, iter.max = 100, m = 2, dmetric = "correlation")
hullplot(data, res.fcm.m2dmetriccorrelation, pch = 16, cex = .5, main = "FCM with m = 2, dmetric = correlation")
res.fcm.m4dmetricsqeuclidean <- fcm(data, centers = 15, iter.max = 100, m = 4, dmetric = "sqeuclidean")
hullplot(data, res.fcm.m4dmetricsqeuclidean, pch = 16, cex = .5, main = "FCM with m = 4, dmetric = sqeuclidean")
res.fcm.m2dmetricdivergence <- fcm(data, centers = 15, iter.max = 100, m = 2, dmetric = "divergence")
hullplot(data, res.fcm.m2dmetricdivergence, pch = 16, cex = .5, main = "FCM with m = 2, dmetric = divergence")
res.fcm.m2dmetricmanhattan <- fcm(data, centers = 15, iter.max = 100, m = 2, dmetric = "manhattan")
hullplot(data, res.fcm.m2dmetricmanhattan, pch = 16, cex = .5, main = "FCM with m = 2, dmetric = manhattan")
#### PCM ####
# res.pcm <- pcm(data, centers = 15, iter.max = 100, eta = 2, K = 1, oftype = 1, dmetric = "sqeuclidean")
# hullplot(data, res.pcm, pch = 16, cex = .5, main = "")
res.pcm.eta2K1oftype1 <- pcm(data, centers = 15, iter.max = 100, eta = 2, K = 1, oftype = 1, dmetric = "sqeuclidean")
hullplot(data, res.pcm.eta2K1oftype1, pch = 16, cex = .5, main = "PCM with eta = 2, K = 1, oftype = 1")
res.pcm.eta4K1oftype1 <- pcm(data, centers = 15, iter.max = 100, eta = 4, K = 1, oftype = 1, dmetric = "sqeuclidean")
hullplot(data, res.pcm.eta4K1oftype1, pch = 16, cex = .5, main = "PCM with eta = 4, K = 1, oftype = 1")
res.pcm.eta2K2oftype1 <- pcm(data, centers = 15, iter.max = 100, eta = 2, K = 2, oftype = 1, dmetric = "sqeuclidean")
hullplot(data, res.pcm.eta2K2oftype1, pch = 16, cex = .5, main = "PCM with eta = 2, K = 2, oftype = 1")
res.pcm.eta2K1oftype2 <- pcm(data, centers = 15, iter.max = 100, eta = 2, K = 1, oftype = 2, dmetric = "sqeuclidean")
hullplot(data, res.pcm.eta2K1oftype2, pch = 16, cex = .5, main = "PCM with eta = 2, K = 1, oftype = 2")
#### FMLE ####
res.fmle.m2ggversionsimple <- gg(data, centers = 15, iter.max = 5, m = 2, ggversion = "simple")
hullplot(data, res.fmle.m2ggversionsimple, pch = 16, cex = .5, main = "GG-FMLE with m = 2, ggversion = simple")
res.fmle.m4ggversionsimple <- gg(data, centers = 15, iter.max = 5, m = 4, ggversion = "simple")
hullplot(data, res.fmle.m2ggversionsimple, pch = 16, cex = .5, main = "GG-FMLE with m = 4, ggversion = simple")
res.fmle.m2ggversionoriginal <- gg(data, centers = 15, iter.max = 5, m = 2, ggversion = "original")
hullplot(data, res.fmle.m2ggversionsimple, pch = 16, cex = .5, main = "GG-FMLE with m = 2, ggversion = original")
#### EM ####
set.seed(1234)
emobj <- simple.init(data, nclass = 15)
emobj <- emcluster(data, emobj, assign.class = TRUE)
plotem(emobj, data, color.pch=16, main="EM cluster")
#### GK ####
# res.gk <- gk(data, centers = 15, iter.max = 100, m = 2)
# hullplot(data, res.gk, pch = 16, cex = .5, main = "")
# plot(data, col = res.gk$cluster, pch = 16, cex = .5)
# ppclust implementation seems broken!
res.gk.ppclust.m2 <- gk(data, centers = 15, iter.max = 100, m = 2)
hullplot(data, res.gk.ppclust.m2, pch = 16, cex = .5, main = "GK with m = 2")
plot(data, col = res.gk.ppclust.m2$cluster, pch = 16, cex = .5)
# Use fclust instead of ppclust
res.gk.fclust.m2 = FKM.gk(data, k = 15, maxit = 100, m = 2)
# hullplot doesn't work because FKM.gk produces an object of class with a member called "clus" instead of "cluster"...
# hullplot(data, res.gk.fclust.m2, pch = 16, cex = .5, main = "")
plot(data, col = res.gk.fclust.m2$clus, pch = 16, cex = .5)
res.gk.fclust.m4 = FKM.gk(data, k = 15, maxit = 100, m = 4)
plot(data, col = res.gk.fclust.m4$clus, pch = 16, cex = .5)
#### DBSCAN ####
# Use k = minPts used later for dbscan. To find eps, find the knee on the following graph
kNNdistplot(data, k = 15)
# And so seems that eps = .08
abline(h = .08, col = "red", lty = 2)
res.dbscan.eps008minPts4 <- dbscan(data, borderPoints = TRUE, eps = .08, minPts = 4)
hullplot(data, res.dbscan.eps008minPts4, pch = 16, cex = .5, main = "DBSCAN with eps = .08, minPts = 4")
# pairs(data, col = res.dbscan.eps008minPts4$cluster + 1)
kNNdistplot(data, k = 15)
# And so seems that eps = .075
abline(h = .075, col = "red", lty = 2)
res.dbscan.eps008minPts12 <- dbscan(data, borderPoints = TRUE, eps = .08, minPts = 12)
hullplot(data, res.dbscan.eps008minPts12, pch = 16, cex = .5, main = "DBSCAN with eps = .08, minPts = 12")
# pairs() useful to scatterplot when dimentions > 2
# pairs(data, col = res.dbscan.eps0075minPts10$eps008minPts10 + 1)
# See what happens with eps = 0.15
res.dbscan.eps015minPts10 <- dbscan(data, borderPoints = TRUE, eps = .15, minPts = 10)
hullplot(data, res.dbscan.eps015minPts10, pch = 16, cex = .5, main = "DBSCAN with eps = .15, minPts = 10")
# Predicting new data's classification
newdata <- c(1.5, 2)
predict(res.dbscan.eps008minPts12, newdata, data = data)
#### BIRCH ####
######## CRASHES :-( ########
# To install a package from a local tar.
# A (deprecated) BIRCH implementation can be found here: https://cran.r-project.org/web/packages/birch/
# install.packages(path_to_tar, repos = NULL, type="source")
# Install Rtools so the downloaded package can be built: https://cran.r-project.org/bin/windows/Rtools/Rtools35.exe
# install.packages("ellipse")
# install.packages("C:\\Users\\PC\\Desktop\\Studia\\R\\4GL_Zad2\\birch_1.2-3.tar.gz", repos = NULL, type="source")
# datamatrix = as.matrix(data)
# res.birch.radius01compact01 <- birch(datamatrix, keeptree = TRUE, radius = .1, compact = .1)
# birchobj <- birch.getTree(res.birch.radius01compact01)
######## HCLUST ########
hclust.dist.methodeuclidean <- dist(data, method = "euclidean")
res.hclust.methodwardD <- hclust(hclust.dist.methodeuclidean, method = "ward.D")
plot(res.hclust.methodwardD)
rect.hclust(res.hclust.methodwardD, k = 15)
res.hclust.methodwardD.groups <- cutree(res.hclust.methodwardD, k = 15)
hullplot(data, res.hclust.methodwardD.groups, pch = 16, cex = .3, main = "Hclust Dendrogram with k = 15, method = ward.D")
hclust.dist.methodeuclidean <- dist(data, method = "euclidean")
res.hclust.methodmedian <- hclust(hclust.dist.methodeuclidean, method = "median")
plot(res.hclust.methodmedian)
rect.hclust(res.hclust.methodmedian, k = 15)
res.hclust.methodmedian.groups <- cutree(res.hclust.methodmedian, k = 15)
hullplot(data, res.hclust.methodmedian.groups, pch = 16, cex = .3, main = "Hclust Dendrogram with k = 15, method = median")
| /4GL/clusters.R | no_license | kontrybutor/tsbd_sem2 | R | false | false | 8,790 | r |
# Get more colors!
palette(c("black", "green", "blue", "cyan", "magenta", "yellow", "grey", "purple", "brown", "violet", "pink", "orange", "darkgreen", "khaki", "darkorange", "red"))
# Black will be used for vectors classified as noise (cluster 0 for dbscan)
#### Start ####
# install.packages("BBmisc")
# install.packages("ppclust")
# install.packages("fclust")
# install.packages("dbscan")
# install.packages("EMCluster")
# install.packages("advclust")
library(EMCluster)
library(BBmisc)
library(ppclust)
library(fclust)
library(dbscan)
library(advclust)
#### Datasets ####
file <- "/Users/rafal/Documents/Studia/4GL/4GL_Zad2/Datasets/2d_k15_little_overlap.txt"
data <- read.table(file = file, sep = "" , header = F , nrows = 5000,
na.strings = "", stringsAsFactors = F)
data = normalize(data, method = "standardize", range = c(0, 1))
str(data)
summary(data)
plot(data, pch = 16, cex = .3)
# file <- "/Users/rafal/Documents/Studia/4GL/4GL_Zad2/Datasets/2d_k15_much_overlap.txt"
# data <- read.table(file = file, sep = "" , header = F , nrows = 5000,
# na.strings = "", stringsAsFactors = F)
# data = normalize(data, method = "standardize", range = c(0, 1))
# plot(data, pch = 16, cex = .3)
#
# file <- "/Users/rafal/Documents/Studia/4GL/4GL_Zad2/Datasets/5d_k2_thyroid.txt"
# data <- read.table(file = file, sep = "" , header = F , nrows = 5000,
# na.strings = "", stringsAsFactors = F)
# data = normalize(data, method = "standardize", range = c(0, 1))
# plot(data, pch = 16, cex = .3)
#plot(data, col = res.pcm$cluster, pch = 16, cex = .5)
#### HCM ####
res.hcm <- hcm(data, centers = 15, iter.max = 100, dmetric = "euclidean")
hullplot(data, res.hcm, pch = 16, cex = .5, main = "HCM with dmetric = euclidean")
#### FCM ####
# res.fcm <- fcm(data, centers = 15, iter.max = 100, m = 2, dmetric = "sqeuclidean")
# hullplot(data, res.fcm, pch = 16, cex = .5, main = "")
res.fcm.m2dmetricsqeuclidean <- fcm(data, centers = 15, iter.max = 100, m = 2, dmetric = "sqeuclidean")
hullplot(data, res.fcm.m2dmetricsqeuclidean, pch = 16, cex = .5, main = "FCM with m = 2, dmetric = sqeuclidean")
res.fcm.m2dmetriceuclidean <- fcm(data, centers = 15, iter.max = 100, m = 2, dmetric = "euclidean")
hullplot(data, res.fcm.m2dmetriceuclidean, pch = 16, cex = .5, main = "FCM with m = 2, dmetric = euclidean")
res.fcm.m2dmetriccorrelation <- fcm(data, centers = 15, iter.max = 100, m = 2, dmetric = "correlation")
hullplot(data, res.fcm.m2dmetriccorrelation, pch = 16, cex = .5, main = "FCM with m = 2, dmetric = correlation")
res.fcm.m4dmetricsqeuclidean <- fcm(data, centers = 15, iter.max = 100, m = 4, dmetric = "sqeuclidean")
hullplot(data, res.fcm.m4dmetricsqeuclidean, pch = 16, cex = .5, main = "FCM with m = 4, dmetric = sqeuclidean")
res.fcm.m2dmetricdivergence <- fcm(data, centers = 15, iter.max = 100, m = 2, dmetric = "divergence")
hullplot(data, res.fcm.m2dmetricdivergence, pch = 16, cex = .5, main = "FCM with m = 2, dmetric = divergence")
res.fcm.m2dmetricmanhattan <- fcm(data, centers = 15, iter.max = 100, m = 2, dmetric = "manhattan")
hullplot(data, res.fcm.m2dmetricmanhattan, pch = 16, cex = .5, main = "FCM with m = 2, dmetric = manhattan")
#### PCM ####
# res.pcm <- pcm(data, centers = 15, iter.max = 100, eta = 2, K = 1, oftype = 1, dmetric = "sqeuclidean")
# hullplot(data, res.pcm, pch = 16, cex = .5, main = "")
res.pcm.eta2K1oftype1 <- pcm(data, centers = 15, iter.max = 100, eta = 2, K = 1, oftype = 1, dmetric = "sqeuclidean")
hullplot(data, res.pcm.eta2K1oftype1, pch = 16, cex = .5, main = "PCM with eta = 2, K = 1, oftype = 1")
res.pcm.eta4K1oftype1 <- pcm(data, centers = 15, iter.max = 100, eta = 4, K = 1, oftype = 1, dmetric = "sqeuclidean")
hullplot(data, res.pcm.eta4K1oftype1, pch = 16, cex = .5, main = "PCM with eta = 4, K = 1, oftype = 1")
res.pcm.eta2K2oftype1 <- pcm(data, centers = 15, iter.max = 100, eta = 2, K = 2, oftype = 1, dmetric = "sqeuclidean")
hullplot(data, res.pcm.eta2K2oftype1, pch = 16, cex = .5, main = "PCM with eta = 2, K = 2, oftype = 1")
res.pcm.eta2K1oftype2 <- pcm(data, centers = 15, iter.max = 100, eta = 2, K = 1, oftype = 2, dmetric = "sqeuclidean")
hullplot(data, res.pcm.eta2K1oftype2, pch = 16, cex = .5, main = "PCM with eta = 2, K = 1, oftype = 2")
#### FMLE ####
res.fmle.m2ggversionsimple <- gg(data, centers = 15, iter.max = 5, m = 2, ggversion = "simple")
hullplot(data, res.fmle.m2ggversionsimple, pch = 16, cex = .5, main = "GG-FMLE with m = 2, ggversion = simple")
res.fmle.m4ggversionsimple <- gg(data, centers = 15, iter.max = 5, m = 4, ggversion = "simple")
hullplot(data, res.fmle.m2ggversionsimple, pch = 16, cex = .5, main = "GG-FMLE with m = 4, ggversion = simple")
res.fmle.m2ggversionoriginal <- gg(data, centers = 15, iter.max = 5, m = 2, ggversion = "original")
hullplot(data, res.fmle.m2ggversionsimple, pch = 16, cex = .5, main = "GG-FMLE with m = 2, ggversion = original")
#### EM ####
set.seed(1234)
emobj <- simple.init(data, nclass = 15)
emobj <- emcluster(data, emobj, assign.class = TRUE)
plotem(emobj, data, color.pch=16, main="EM cluster")
#### GK ####
# res.gk <- gk(data, centers = 15, iter.max = 100, m = 2)
# hullplot(data, res.gk, pch = 16, cex = .5, main = "")
# plot(data, col = res.gk$cluster, pch = 16, cex = .5)
# ppclust implementation seems broken!
res.gk.ppclust.m2 <- gk(data, centers = 15, iter.max = 100, m = 2)
hullplot(data, res.gk.ppclust.m2, pch = 16, cex = .5, main = "GK with m = 2")
plot(data, col = res.gk.ppclust.m2$cluster, pch = 16, cex = .5)
# Use fclust instead of ppclust
res.gk.fclust.m2 = FKM.gk(data, k = 15, maxit = 100, m = 2)
# hullplot doesn't work because FKM.gk produces an object of class with a member called "clus" instead of "cluster"...
# hullplot(data, res.gk.fclust.m2, pch = 16, cex = .5, main = "")
plot(data, col = res.gk.fclust.m2$clus, pch = 16, cex = .5)
res.gk.fclust.m4 = FKM.gk(data, k = 15, maxit = 100, m = 4)
plot(data, col = res.gk.fclust.m4$clus, pch = 16, cex = .5)
#### DBSCAN ####
# Use k = minPts used later for dbscan. To find eps, find the knee on the following graph
kNNdistplot(data, k = 15)
# And so seems that eps = .08
abline(h = .08, col = "red", lty = 2)
res.dbscan.eps008minPts4 <- dbscan(data, borderPoints = TRUE, eps = .08, minPts = 4)
hullplot(data, res.dbscan.eps008minPts4, pch = 16, cex = .5, main = "DBSCAN with eps = .08, minPts = 4")
# pairs(data, col = res.dbscan.eps008minPts4$cluster + 1)
kNNdistplot(data, k = 15)
# And so seems that eps = .075
abline(h = .075, col = "red", lty = 2)
res.dbscan.eps008minPts12 <- dbscan(data, borderPoints = TRUE, eps = .08, minPts = 12)
hullplot(data, res.dbscan.eps008minPts12, pch = 16, cex = .5, main = "DBSCAN with eps = .08, minPts = 12")
# pairs() useful to scatterplot when dimentions > 2
# pairs(data, col = res.dbscan.eps0075minPts10$eps008minPts10 + 1)
# See what happens with eps = 0.15
res.dbscan.eps015minPts10 <- dbscan(data, borderPoints = TRUE, eps = .15, minPts = 10)
hullplot(data, res.dbscan.eps015minPts10, pch = 16, cex = .5, main = "DBSCAN with eps = .15, minPts = 10")
# Predicting new data's classification
newdata <- c(1.5, 2)
predict(res.dbscan.eps008minPts12, newdata, data = data)
#### BIRCH ####
######## CRASHES :-( ########
# To install a package from a local tar.
# A (deprecated) BIRCH implementation can be found here: https://cran.r-project.org/web/packages/birch/
# install.packages(path_to_tar, repos = NULL, type="source")
# Install Rtools so the downloaded package can be built: https://cran.r-project.org/bin/windows/Rtools/Rtools35.exe
# install.packages("ellipse")
# install.packages("C:\\Users\\PC\\Desktop\\Studia\\R\\4GL_Zad2\\birch_1.2-3.tar.gz", repos = NULL, type="source")
# datamatrix = as.matrix(data)
# res.birch.radius01compact01 <- birch(datamatrix, keeptree = TRUE, radius = .1, compact = .1)
# birchobj <- birch.getTree(res.birch.radius01compact01)
######## HCLUST ########
hclust.dist.methodeuclidean <- dist(data, method = "euclidean")
res.hclust.methodwardD <- hclust(hclust.dist.methodeuclidean, method = "ward.D")
plot(res.hclust.methodwardD)
rect.hclust(res.hclust.methodwardD, k = 15)
res.hclust.methodwardD.groups <- cutree(res.hclust.methodwardD, k = 15)
hullplot(data, res.hclust.methodwardD.groups, pch = 16, cex = .3, main = "Hclust Dendrogram with k = 15, method = ward.D")
hclust.dist.methodeuclidean <- dist(data, method = "euclidean")
res.hclust.methodmedian <- hclust(hclust.dist.methodeuclidean, method = "median")
plot(res.hclust.methodmedian)
rect.hclust(res.hclust.methodmedian, k = 15)
res.hclust.methodmedian.groups <- cutree(res.hclust.methodmedian, k = 15)
hullplot(data, res.hclust.methodmedian.groups, pch = 16, cex = .3, main = "Hclust Dendrogram with k = 15, method = median")
|
setwd("~/terrorism/")
df <- read.csv("globalterrorismdb_0617dist.csv")
| /supplementary scripts/mix of various R scripts/terrorism.R | no_license | aprezvykh/bioinformatics | R | false | false | 71 | r | setwd("~/terrorism/")
df <- read.csv("globalterrorismdb_0617dist.csv")
|
# Script: bjn20_HPC_2020_main.R
#
# Desc: CMEE 2020 HPC excercises R code main proforma
#
# Arguments:
# -
#
# Output:
# -
#
# Date: 21 Dec 2020
#
name <- "Ben Nouhan"
preferred_name <- "Ben"
email <- "bjn20@ic.ac.uk"
username <- "bjn20"
personal_speciation_rate <- 0.0052206
# Question 1
species_richness <- function(community){
### Measures species richness of a community; returns number of unique objects present in an input vector
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
#
# Returns: integer 1 to Inf; the count of species types present in community
########
### Removes duplicates from vector; counts objects in vector
return(length(unique(community)))
}
# Question 2
init_community_max <- function(size){
### Generates initial state for a simulation community, with max possible number of species of the community based on its size
#
# Arguments:
# size - integer 1 to Inf; number of individuals in a community
#
# Returns: vector of +ve integers; sequence of 1 to community size increasing by 1 each time
#
########
### Generates sequence of integers from 1 to input integer, by 1
return(1:size)
}
# Question 3
init_community_min <- function(size){
### Generates initial state for a simulation community, with min possible number of species of the community based on its size
#
# Arguments:
# size - integer 1 to Inf; number of individuals in a community
#
# Returns: vector of integers; repeated sequence of 1s, length equal to that of community size
#
########
### Generates sequence of 1s equal in length to value of input integer
return(rep(1, size))
}
# Question 4
choose_two <- function(max_value){
### Generates vector of 2 objects from a sequence of integers, randomly sampled without replacement
#
# Arguments:
# max_value - integer 1 to Inf; maximum value present in a numeric vector
#
# Returns: vector of integers; two randomly sampled (without replacement) integers from sequence of 1 to max_value
#
########
### Takes 2 random samples without replacement (replacement default = FALSE) from 1:max_value
return(sample(max_value, 2))
}
# Question 5
neutral_step <- function(community){
### Performs a single step of a simple neutral model simulation on a community vector (no speciation)
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
#
# Returns: vector of +ve integers; alterered version of community, whereby one object has been replaced by one of the others
#
########
### Randomly choose index of 2 individuals of community
pair <- choose_two(length(community))
### Replace first with value of second within community
community[pair[1]] <- community[pair[2]]
return(community)
}
# Question 6
neutral_generation <- function(community){
### Simulates a generation's worth of neutral steps, first calculating how many steps must occur in a generation (no speciation)
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
#
# Returns: vector of +ve integers; alterered version of community, whereby num_steps changes have been made by calling neutral_step
#
########
### Finds number of neutral steps needed for generation
num_steps <- length(community)/2
# if number of individuals is odd, randomly sample from n/2 rounded up (ceiling) or down (floor)
num_steps <- ifelse(num_steps %% 1 == .5, sample(c(ceiling, floor), 1)[[1]](num_steps), num_steps)
### Loop repeating neutral step "num_steps" times
for (step in 1:num_steps) {
community <- neutral_step(community) }
return(community)
}
# Question 7
neutral_time_series <- function(community,duration) {
### Simulates generations of the community based on neutral theory, and finds the species richness of each generation (no speciation)
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
# duration - +ve integer; number of generations the simulation is to be run for
#
# Returns: vector of +ve integers; time series of species richness at each stage, from gen(0) to gen(duration); length is duration+1
#
########
### Finds starting species richness, creates vector
rich_vect <- species_richness(community)
### Simulates next generation "duration" times, adding species richness of each generation to above vector
for (gen in 1:duration){
community <- neutral_generation(community)
rich_vect <- c(rich_vect, species_richness(community)) }
return(rich_vect)
}
# Question 8
question_8 <- function() {
### Plots a time-series graph of species richness over 200 generations simulated with the neutral theory model, starting with a community of 100 individuals all of different species (no speciation)
#
# Arguments: -
#
# Returns: character string; statement answering "What state will the system always converge to if you wait long enough, and why?"
#
########
### Clear all graphics
graphics.off()
### Generate the data
generations_vect <- 0:200
species_richness_vect <- neutral_time_series(1:100, 200)
### Plots species_richness_vect against generations_vect
plot(generations_vect, species_richness_vect,
# labels axes and graph as a whole
xlab="Generations Elapsed", ylab="Species Richness",
main="Species Richness of a Community \nover 200 Generations",
# adds line, removes datapoints, sets its colour and width
col="mediumblue", type="l", lwd=3,
# disable numerical axes labels, sets edge of axes to origin
xaxt="n", yaxt="n", xaxs="i", yaxs="i", ylim=c(0, 100), xlim=c(-.5, 200))
# adds custom numerical axes labels, and minor axis ticks
axis(1, seq(0, 200, by=20), las=1, labels=TRUE)
axis(2, seq(0, 100, by=10), las=2, labels=TRUE)
axis(1, seq(0, 200, by=4), lwd.ticks=.3, labels=FALSE)
axis(2, seq(0, 20, by=2), lwd.ticks=.3, labels=FALSE)
### Statement
return("The system will always converge on a state of monodominance. Since no speciation can occur in this model, each step can only reduce or maintain species richness, hence species richness will decrease over time until it reaches the mininmum of 1.")
}
# Question 9
neutral_step_speciation <- function(community,speciation_rate) {
### Performs a single step of a simple neutral model simulation on a community vector, with speciation as a possibility
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
# speciation_rate - numeric 0 to 1; parameter to set rate at which speciation occurs when an object is replaced
#
# Returns: vector of +ve integers; alterered version of community, whereby one object has been replaced by one of the others, or a new number
#
########
### Randomly choose index of 2 individuals of community
pair <- choose_two(length(community))
### Applies a probability of "speciation_rate" that the replacement number is changed to a new number (ie speciation occurs)
if (runif(1) < speciation_rate){ pair[[2]] <- max(community)+1 #quicker to argue preallocated runif dist?#this number may have existed; doesnt matter for species richness but may for other stuff; ideally use count
}else{ pair[[2]] <- community[pair[2]] }
### Replace first with value of second within community
community[pair[1]] <- pair[2]
return(community)
}
# Question 10
neutral_generation_speciation <- function(community,speciation_rate) {
### Simulates a generation's worth of neutral steps, first calculating how many steps must occur in a generation, with speciation as a possibility
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
# speciation_rate - numeric 0 to 1; parameter to set rate at which speciation occurs when an object is replaced
#
# Returns: vector of +ve integers; alterered version of community, whereby num_steps changes have been made by calling neutral_step_speciation
#
########
### Finds number of neutral steps needed for generation
num_steps <- length(community)/2
# if number of individuals is odd, randomly sample from n/2 rounded up (ceiling) or down (floor)
num_steps <- ifelse(num_steps %% 1 == .5, sample(c(ceiling, floor), 1)[[1]](num_steps), num_steps)
### Loop repeating neutral step (with speciation) "num_steps" times
for (step in 1:num_steps) {
community <- neutral_step_speciation(community,speciation_rate) }
return(community)
}
# Question 11
neutral_time_series_speciation <- function(community,speciation_rate,duration) {
### Simulates generations of the community based on neutral theory, and finds the species richness of each generation, with speciation as a possibility
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
# speciation_rate - numeric 0 to 1; parameter to set rate at which speciation occurs when an object is replaced
# duration - +ve integer; number of generations the simulation is to be run for
#
# Returns: vector of +ve integers; time series of species richness at each stage, from gen(0) to gen(duration); length is duration+1
#
########
### Finds starting species richness, creates vector
rich_vect <- species_richness(community)
### Simulates next generation "duration" times, adding species richness of each generation to above vector
for (gen in 1:duration){
community <- neutral_generation_speciation(community,speciation_rate)
rich_vect <- c(rich_vect, species_richness(community)) }
return(rich_vect)
}
# Question 12
question_12 <- function() {
### Plots a time-series graph of species richness over 200 generations simulated with the neutral theory model, starting with a community of 100 individuals all of different species (blue) and all the same species (red), with speciation as a possibility
#
# Arguments: -
#
# Returns: character string; statement explaining the effect intitial conditions had, and answering "Why does the neutral model give these particular results?"
#
########
### Clear all graphics
graphics.off()
### Generate the data; one richness vector with each max and min richness
generations_vect <- 0:200
max_species_richness <- neutral_time_series_speciation(1:100, 0.1, 200)
min_species_richness <- neutral_time_series_speciation(rep(1,100), 0.1, 200)
### Plots max_species_richness against generations_vect
plot(generations_vect, max_species_richness,
# labels axes and graph as a whole
xlab="Generations Elapsed", ylab="Species Richness",
main="Species Richness of Maximally and Minimally Diverse Communities \n with a Speciation Rate of 0.1 over 200 Generations",
# adds line, removes datapoints, sets its colour and width
col="mediumblue", type="l", lwd=3,
# disable numerical axes labels, sets edge of axes to origin
xaxt="n", yaxt="n", xaxs="i", yaxs="i", ylim=c(0, 100), xlim=c(-.5, 200))
# plots min_species_richness against generations_vect, formats line
lines(generations_vect, min_species_richness, col="maroon", lwd=3)
# adds legend for the lines in topright corner
legend("topright", col=c("mediumblue","maroon"), lwd=3, cex=0.8,
legend=c("Maximum Initial Richness","Minimum Initial Richness"))
# adds custom numerical axes labels, and minor axis ticks
axis(1, seq(0, 200, by=20), las=1, labels=TRUE)
axis(2, seq(0, 100, by=10), las=2, labels=TRUE)
axis(1, seq(0, 200, by=4), lwd.ticks=.3, labels=FALSE)
axis(2, seq(0, 100, by=2), lwd.ticks=.3, labels=FALSE)
### Statement
return("Speciation introduces a force to increase species richness, where before there was only a force to decrease it, extinction. The community of maximum species richness has nowhere to go but a species richness decrease, and vice versa. They will hence meet in the middle, where these two forces reach an equilibrium. With such a small sample, random variation introduces significant fluctuations even at equilibrium, but as population size or number ov averaged simulations increases, these lines will converge as generations pass.")
}
# Question 13
species_abundance <- function(community) {
### Converts input vector into vector of species frequencies in descending order
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
#
# Returns: vector of +ve integers; vector of species frequencies in descending order
#
########
### Makes frequency table of vector, sorts in descending order, converts to vector
return(as.vector(sort(table(community), decreasing=TRUE)))
}
# Question 14
octaves <- function(abundance_vector) {
### Bins the argued species abundancies into octave classes
#
# Arguments:
# abundance_vector - vector of +ve integers; vector of species frequencies of a community in descending order
#
# Returns: vector of integers 0 to Inf; vector of octave class frequencies in ascending order of class size
#
########
### log2()+1 transforms data, rounds each down, finds frequencies of result
return(tabulate(floor(log2(abundance_vector)+1)))
}
# Question 15
sum_vect <- function(x, y) {
### Sums two vectors, after adding tailing zeros to the shorter vector in vectorised manner if there is one
#
# Arguments:
# x, y - vectors of integers 0 to Inf; vectors to be summed
#
# Returns: vector of integers 0 to Inf; sum of vectors x and y
#
########
### Makes list of the vectors; finds length of longest one
vector_ls <- list(x,y)
max_length <- max(sapply(vector_ls, length))
### Adds zeros to shorter vector in list, making them same length
vector_ls <- lapply(vector_ls, function(x) c(x, rep(0, max_length-length(x))))
return(vector_ls[[1]] + vector_ls[[2]])
}
# Question 16
question_16 <- function() {
### Plots a bargraph of mean octave class frquencies, representning mean species abundance distribution, from 100 evenly-spaced samples of 2000 post-burn-off period generations of neutral model simulations for a community of 100 individuals.
#
# Arguments: -
#
# Returns: character string; statement answering "Does the initial condition of the system matter, and why/why not?"
#
########
### Clear all graphics
graphics.off()
### Generate the community of 100 individuals with maximum species richness
community_max <- init_community_max(100)
### Attain state of community after 200-gen burn-in period, and the octave
for (gen in 1:200){
community_max <- neutral_generation_speciation(community_max, 0.1)}
oct_total <- octaves(species_abundance(community_max)); oct_count <- 1
### Run simulation 2000 more times, take a sample every 20 generations, add to total, and ultimately didvide by number of octaves total for the mean
for (gen in 1:2000){
community_max <- neutral_generation_speciation(community_max, 0.1)
if (gen %% 20 == 0){
oct_total <- sum_vect(oct_total,octaves(species_abundance(community_max)))
oct_count <- oct_count + 1 }}
### Finds the mean octave frequency, the highest y value rounded up, and the abundance ranges corresponding with each octave class, all for use in plotting
mean_oct <- oct_total/oct_count
ymax <- ceiling(max(mean_oct))
x_vals <- 1:length(mean_oct)
x_vals[-1] <- paste0(2^(x_vals[-1]-1), "-", 2^x_vals[-1]-1);
### Creates barplot of the mean species abundance distriubtion, as octaves, for the equilibrium period
barplot(mean_oct, names.arg=x_vals,
# labels axes and graph as a whole
xlab="Species Abundance Ranges", ylab="Mean Abundance Range Frequency",
main="Estimated Mean Species Abundance Distribution as Octaves \n over 2000 Generations",
# disable numerical axes labels, sets edge of axes to origin, sets limit
yaxt ="n", yaxs="i", ylim=c(0, ymax))
# adds custom numerical x axis labels and minor ticks
axis(2, seq(0, ymax, by=1), las=1, labels=TRUE)
axis(2, seq(0, ymax, by=0.2), lwd.ticks=.3, labels=FALSE)
return("The initial condition of the system does not matter using these parameters; as explained in question 12, 200 generations (the burn-off period used here) is more than enough time for equilibrium to be reached from the two extremes of species richness. Hence, at the start of the subsequent 2000 generation simulation, the expected start-point regardless of initial conditions is expected to be the same, differing only due to random fluctuations owing to the small sample size.")
}
# Question 17
cluster_run <- function(speciation_rate, size, wall_time, interval_rich, interval_oct, burn_in_generations, output_file_name) {
### runs the neutral_generation_speciation function repeatedly, periodically analysing the results, all in accordance with the parameters, saving all results to an argued rda file
#
# Arguments:
# speciation_rate - numeric 0-1; determines frequency of speciation
# size - +ve integer; determines population size of communies
# wall_time - +ve integer; determines how long each simulation runs, in minutes
# interval_rich - +ve integer; determines the generation interval at which species richness is taken during burn-in period
# interval_oct - +ve integer; determines the geberation interval at which octaves of species abundance are taken throughout simulations
# burn_in_generations - +ve integer; number of generations before community expected to be comfortably in equilibrium
# output_file_name - char string; name of output rda file
#
# Returns: -
#
########
### Generates community of specified size and of minimum diversity
community <- init_community_min(size)
### Starts clock; creates generation count, and empty data structures to fill
ptm <- proc.time(); nGens <- 1; richness_vect <- NULL; oct_vect <- list()
### Simulates generatrions until 60*wall_time seconds have elapsed
while(proc.time()[3] - ptm[3] < 60*wall_time){
community <- neutral_generation_speciation(community, speciation_rate)
# saves species richness at given interval during given burn in period
if (nGens %% interval_rich == 0 & nGens <= burn_in_generations){
richness_vect <- c(richness_vect, species_richness(community)) }
# saves octaves of species_abundance of community at given interval
if (nGens %% interval_oct == 0){
oct_vect <- c(oct_vect, list(octaves(species_abundance(community)))) }
# adds to generation count
nGens <- nGens + 1 }
# saves finishing time for input to rda file
elapsed_time <- proc.time()[3] - ptm[3]
### Saves final versions of objects created in script, and time elapsed,
save(richness_vect, oct_vect, community, elapsed_time,
# alongside all original parameters, as a file as named in initial argument
speciation_rate, size, wall_time, interval_rich, interval_oct, burn_in_generations, file=output_file_name)
}
# Question 20
process_cluster_results <- function(iter_num=100, popsizes=c(500, 1000, 2500, 5000)) {
### Analyses rda files generated from run_cluster.sh, finding the mean octave of species abundancies for each size of community and saving them to a new rda file
#
# Arguments:
# iter_num - +ve integer; determines number of rda files to be analysed, same as number of iterations run in run_cluster.sh
# popsizes - vector or integers; gives the popsizes to do this analysis on, assuming each have the same number of iterations
#
# Returns: list of vectors; one vector of average species abundancy octave classes of all simulations of the same population size for each population size
#
########
### Creates empty list and populates it with lists of rda files' objects
combined_results <- list()
for (i in 1:iter_num){
# loads each data file based on iter number
fname <- paste0("../../data/output_files/output_file_", i, ".rda")
load(file=fname)
# creates list of objects from data file, adds to list of lists
obj_list <- list(richness_vect, oct_vect, community, elapsed_time, speciation_rate, size, wall_time, interval_rich, interval_oct, burn_in_generations)
combined_results[[length(combined_results)+1]] <- obj_list }
### Finds number of simulations per size, and creates blank list for ouput
sims_per_size <- iter_num / length(popsizes)
mean_oct_ls <- list()
### Finds information for each set of popsizes
for (popsize in 0:(length(popsizes)-1)) {
# corresponding first (by index) simulation for that popsize
size_start <- popsize * sims_per_size
# finds burn-in period, oct interval, & hence number of octaves to remove
burn_in <- combined_results[[size_start + 1]][[10]]
oct_int <- combined_results[[size_start + 1]][[9]]
burn_in_octs <- burn_in/oct_int
### Creates list for all post-burn in octaves for all simulations of a given population size, and populates it
multisim_oct_vect <- list()
for (sim in 1:sims_per_size) {
# for each simulation...
iter <- size_start + sim
# extracts oct_vect and removes octaves taken during burn-in
oct_vect <- combined_results[[iter]][[2]]
postBI_oct_vect <- oct_vect[(burn_in_octs+1):length(oct_vect)]
# appends those left to the master list
multisim_oct_vect <- c(multisim_oct_vect, postBI_oct_vect) }
### Makes all octaves in each master list the same length with 0s
max_length <- max(sapply(multisim_oct_vect, length))
multisim_oct_vect <- lapply(multisim_oct_vect, function(x) c(x, rep(0, max_length - length(x))))
### Converts list of vectors to dataframe, calculates mean of each column
oct_df <- as.data.frame(do.call(rbind, multisim_oct_vect))
mean_oct <- sapply(1:max_length, function(x) mean(oct_df[, x]))
### Appends vecotor of means of each popsize to list of all of them
mean_oct_ls[[length(mean_oct_ls) + 1]] <- mean_oct }
### Saves as .rda file (not actually used) and returns list of lists
save(mean_oct_ls, file = "mean_oct_ls.rda")
return(mean_oct_ls)
}
plot_cluster_results <- function() {
### Plots mean octave classes for each popsize as barcharts, from the results of cluster_run, and saves the underlying data to a new rda file
#
# Arguments: -
#
# Returns: list of vectors; one vector of average species abundancy octave classes of all simulations of the same population size for each population size
#
########
### Clear all graphics; sets num of iterations and popsizes to plot & analyse
graphics.off()
iter_num <- 100
popsizes <- c(500, 1000, 2500, 5000)
### Extracts mean octaves per population size from results of rda files
mean_oct_ls <- process_cluster_results(iter_num, popsizes)
### Sets structure of multiplot, populates it one barplot at a time (tollerates any number of iters and popsizes)
dev.new(width = 16.4, height = 10)
par(mfcol = c(ceiling(length(popsizes)/2), 2))
for (i in 1:length(popsizes)){
# gets corresponding shade of grey and vector, and the plot's y axis limit and interval based off of it
mean_oct <- mean_oct_ls[[i]]
ymax <- ceiling(max(mean_oct))
yint <- ceiling(ymax/5)/2
pal <- colorRampPalette(c("lightgrey", "grey20"))(length(popsizes))
print(col)
# creates x axis label of species abundance ranges corresponding to octaves
x_vals <- 1:length(mean_oct)
x_vals[-1] <- paste0(2 ^ (x_vals[-1] - 1), "-", 2 ^ x_vals[-1] - 1)
# calculates and inputs the row and col number for the plot in in multiplot
row <- ceiling(i/2); col <- ifelse(i%%2!=0, 1, 2)
par(mfg = c(row, col))
### Plots barplot of means of octave class values
barplot(mean_oct, names.arg=x_vals, cex.names=.8,
# labels axes and graph as a whole
xlab="Species Abundance Ranges", ylab="Mean Abundance Range Frequency",
main=paste("Mean Species Abundance Distribution as Octaves \n for Communities of", popsizes[i], "individuals"),
# disable numerical axes labels, sets edge of axes to origin, sets limit
yaxt="n", yaxs="i", ylim=c(0, ymax), col=pal[i])
# adds custom numerical x axis labels and minor ticks
axis(2, seq(0, ymax, by=yint), cex.axis=0.8, las=1, labels=TRUE)
axis(2, seq(0, ymax, by=yint/5), lwd.ticks=.3, labels=FALSE) }
### Returns the data plotted within the function
return(mean_oct_ls)
}
# Question 21
question_21 <- function() {
answer <- list(log(8)/log(3), "8 units of the constiuent, smaller object are needed to increase the width & height of the larger by 3 times: hence, where the number of dimensions is given by x, 3^x = 8, and x = log(8)/log(3)")
return(answer)
}
# Question 22
question_22 <- function() {
answer <- list(log(20)/log(3), "20 units of the constiuent, smaller object are needed to increase the width, height and depth of the larger by 3 times: hence, where the number of dimensions is given by x, 3^x = 20, and x = log(20)/log(3)")
return(answer)
}
# Question 23
chaos_game <- function(A=c(0,0), B=c(3,4), C=c(4,1), X=c(0,0), points=NULL, dist=.5, reps=100000, plot=TRUE){
### Generates coordinates to plot a fractal, trianglular shape (or other shapes with non-default arguments), and either plots the shape and returns an explanatory statement (default), or only returns the coords matrix used to plot the shape, depending on "plot" argument
#
# Arguments:
# A, B, C - coordinates vectors; coordinates for the shape to form between
# X - coordinates vector; start point for the shape
# points - matrix of coordinate vectors; replaces the matrix otherwise made from A, B and C, allowing more than 3 points; coordinates muct be concatenated by row, where ncol(points)==2 and nrow(points)==[number of coordinates sets]
# dist - +ve numeric; fraction of the distance moved by point in coords to randomly sampled point
# reps - +ve integer; number of times the last coordinate in the coords matrix will move halfway towards a randomly chosen point from A, B and C; increases resolution
# plot - logical; if TRUE (default), character string is returned and generated shape is plotted; else, coords matrix is returned
#
# Returns:
# plot=TRUE - character string; statement describing and explaining the generated shape
# plot=FALSE - matrix of coordinate vectors; coords matrix generated by the function, length==reps, contains all coordinates needed to plot the shape
#
########
### Clear all graphics; sets num of iterations and popsizes to plot & analyse
graphics.off(); set.seed(2)
### Creates matrix of provided points if a suitable matrix isn't provided, & preallocates one to be populated w/ coordinates constituting the shape
if(is.null(points)){ points <- matrix(c(A, B, C), byrow=TRUE, ncol=2) }
coords <- matrix(rep(X, reps), byrow=TRUE, ncol=2)
### Creates a vector of sampled numbers 1-3, uses it to generate a matrix of randomly chosen coordinates from "points"
rnums <- sample(nrow(points), reps, replace=TRUE)
rpoints <- points[rnums,]
### Populates coords with coordinates half way between previous coordinates and the corresponding randomly sampled point from "rpoints"
for (rep in 2:reps){
coords[rep,] <- (coords[rep-1,] + rpoints[rep-1,])*dist }
### If plot is FALSE (TRUE by default), not plot, returns coords matrix
if (plot == FALSE){ return(coords)
### Else plots the shape generated without axes, returns explanatory statement
}else{ plot(coords, cex=.001, pch=20, axes=FALSE, ann=FALSE)
return("This code generates a fractal shape with dimension of log(3)/log(2), equivalent to the shape in Q21 but with triangles rather than squares. The plane it's on appears not to be flat on the screen because the points are not equally spaced. In fact, a 2D equalateral triangle with integer coordinates cannot exist.") }
}
# Question 24
turtle <- function(start_position, direction, length, grad=FALSE, lwd=1) {
### Draws a segment between the input coordinate and the output, which varies depending on parameters
#
# Arguments:
# start_position - coordinates vector; first coordinate for the line segment
# direction - radian; sets angle of the line segment from start position
# length - +ve numeric; sets length of line from start position
# grad - logical; if TRUE, a colour gradient will be implemented, colouring the line between brown and light green based on input length
# lwd - +ve numeric; sets width of line
#
# Returns:
# end_position - coordinates vector; second coordinate for the line segment
#
########
### Calculates end point from parameters
end_position <- start_position + length * sin(c(pi/2-direction, direction))
### Uses colour ramp palette from brown to green based off length, if set on
ifelse(grad==TRUE, col <- colorRampPalette(c("brown", "darkgreen", "lightgreen"))(9)[ceiling(-log2(length))-2], col <- "black")
### Draws a line between start and end positions, and returns the latter
lines(rbind(start_position, end_position), col=col, lwd=lwd)
return(end_position)
}
# Question 25
elbow <- function(start_position, direction, length) {
### Draws a segment from the input coordinate based on parameters by calling the turtle function once, and then again to draw a second, shorter line from the end of the first at a pre-set angle
#
# Arguments:
# start_position - coordinates vector; first coordinate for the line segment
# direction - radian; sets angle of the line segment from start position
# length - +ve numeric; sets length of line from start position
#
# Returns: -
#
########
### Calls turtle to draw first line, captures end position for second call
end_position <- turtle(start_position, direction, length)
### Calls turtle to draw second line, catches return to avoid unwanted output
junk <- turtle(end_position, direction-pi/4, length*0.95)
}
# Question 26
spiral <- function(start_position, direction, length) {
### Calls the turtle function with the input parameters, then calls itself recursively with altered parameters until length decreases below 0.001
#
# Arguments:
# start_position - coordinates vector; first coordinate for the line segment
# direction - radian; sets angle of the line segment from start position
# length - +ve numeric; sets length of line from start position
#
# Returns: character string; Explains why a problem is encountered without setting the aformentioned length limit
#
########
### Calls turtle to draw first line, captures end position for next call
end_position <- turtle(start_position, direction, length)
### Calls itself recursively with altered parameters until length < 0.001
if (length > 0.001){
spiral(end_position, direction-pi/4, length*0.95)}
return("The function is calling itself recursively, stuck in an infinite loop (or spiral, if you will), so the computer rapidly reaches the default memory limit set by R and returns an error.")
}
# Question 27
draw_spiral <- function() {
### Calls and visualises the spiral function with the given parameters
#
# Arguments: -
#
# Returns: -
#
########
### Clears all graphics; opens a new, blank plot
graphics.off(); frame()
### Calls and thus visualises the spiral function with given parameters
spiral(c(.3, .7), 0, .3)
}
# Question 28
tree <- function(start_position=c(.5,0), direction=pi/2, length=.2, e=0.001, grad=FALSE, lwd=1, adj_len=c(0.65,0.65), adj_ang=pi/4) {
### Calls the turtle function with the input parameters, then calls itself recursively with two sets of altered parameters, one at a time, until length decreases below input value of e
#
# Arguments:
# start_position - coordinates vector; first coordinate for the line segment
# direction - radian; sets angle of the line segment from start position
# length - +ve numeric; sets length of line from start position
# e - +ve numeric; smaller than input length; determines the endpoint of the recursive calls, relying on the function to incrementally decrease the size of "length"
# grad - logical; if TRUE, a colour gradient will be implemented, colouring the line between brown and light green based on input length
# lwd - +ve numeric; sets width of line
# adj_len - numeric vector; modulates length of subsequent line segments from new start position
# adj_ang - radian; modulates angle of subsequent line segments from new start position
#
# Returns: -
#
########
### Calls turtle to draw first line, captures end position for next call
end_position <- turtle(start_position, direction, length, grad, lwd)
### Calls itself recursively with two sets of altered parameters, one at a time, until length < e
if (length > e){
tree(end_position, direction+adj_ang, length*adj_len[1], e, grad, lwd, adj_len, adj_ang)
tree(end_position, direction-adj_ang, length*adj_len[2], e, grad, lwd, adj_len, adj_ang)}
}
draw_tree <- function() {
### Calls and visualises the tree function with its default parameters
#
# Arguments: -
#
# Returns: -
#
########
### Clears all graphics; opens a new, blank plot
graphics.off(); frame()
### Calls and thus visualises the spiral function with default parameters
tree()
}
# Question 29
fern <- function(start_position=c(.5,0), direction=pi/2, length=.1, e=0.001, grad=FALSE, lwd=1, adj_len=c(0.38,0.87), adj_ang=pi/4) {
### Calls the turtle function with the input parameters, then calls itself recursively with two sets of altered parameters, one at a time, until length decreases below input value of e
#
# Arguments:
# start_position - coordinates vector; first coordinate for the line segment
# direction - radian; sets angle of the line segment from start position
# length - +ve numeric; sets length of line from start position
# e - +ve numeric; smaller than input length; determines the endpoint of the recursive calls, relying on the function to incrementally decrease the size of "length"
# grad - logical; if TRUE, a colour gradient will be implemented, colouring the line between brown and light green based on input length
# lwd - +ve numeric; sets width of line
# adj_len - numeric vector; modulates length of subsequent line segments from new start position
# adj_ang - radian; modulates angle of subsequent line segments from new start position
#
# Returns: -
#
########
### Calls turtle to draw first line, captures end position for next call
end_position <- turtle(start_position, direction, length, grad, lwd)
### Calls itself recursively with two sets of altered parameters, one at a time, until length < e
if (length > e){
fern(end_position, direction+adj_ang, length*adj_len[1], e, grad, lwd, adj_len, adj_ang)
fern(end_position, direction, length*adj_len[2], e, grad, lwd, adj_len, adj_ang)}
}
draw_fern <- function() {
### Calls and visualises the fern function with its default parameters
#
# Arguments: -
#
# Returns: -
#
########
### Clears all graphics; opens a new, blank plot
graphics.off(); frame()
### Calls and thus visualises the fern function with default parameters
fern()
}
# Question 30
fern2 <- function(start_position=c(.5,0), direction=pi/2, length=.1, dir=1, e=0.001, grad=FALSE, lwd=1, adj_len=c(0.38,0.87), adj_ang=pi/4) {
### Calls the turtle function with the input parameters, then calls itself recursively with two sets of altered parameters, one at a time, until length decreases below input value of e
#
# Arguments:
# start_position - coordinates vector; first coordinate for the line segment
# direction - radian; sets angle of the line segment from start position
# length - +ve numeric; sets length of line from start position
# e - +ve numeric; smaller than input length; determines the endpoint of the recursive calls, relying on the function to incrementally decrease the size of "length"
# grad - logical; if TRUE, a colour gradient will be implemented, colouring the line between brown and light green based on input length
# lwd - +ve numeric; sets width of line
# adj_len - numeric vector; modulates length of subsequent line segments from new start position
# adj_ang - radian; modulates angle of subsequent line segments from new start position
#
# Returns: -
#
########
### Calls turtle to draw first line, captures end position for next call
end_position <- turtle(start_position, direction, length, grad, lwd)
### Calls itself recursively with two sets of altered parameters, one at a time, until length < e
if (length > e){
fern2(end_position, direction+dir*adj_ang, length*adj_len[1], dir, e, grad, lwd, adj_len, adj_ang)
fern2(end_position, direction, length*adj_len[2], -dir, e, grad, lwd, adj_len, adj_ang)}
}
draw_fern2 <- function() {
### Calls and visualises the fern2 function with its default parameters
#
# Arguments: -
#
# Returns: -
#
########
### Clears all graphics; opens a new, blank plot
graphics.off(); frame()
### Calls and thus visualises the fern2 function with default parameters
fern2()
}
################################################################################
############################### CHALLENGES #####################################
################################################################################
# Challenge question A
Challenge_A <- function() {
### Plots a time-series graph of mean species richness over 'nGens' generations simulated 'nSims' times with the neutral theory model, starting with a community of 'nPop' individuals all of different species (blue) and all the same species (red) and an estimate of generation at which equilibrium is reached
#
# Arguments: -
#
# Returns: -
#
########
### Clear all graphics
graphics.off()
### Set parameters: number of simulations, generations & individuals, and confidence interval to be used in plotting
nSims <- 500; nGens <- 80; nPop <- 100; CI <- 97.2
### Creates matrix of 100 starting populations of each max and min richness
sims <- matrix(c( rep(1:nPop, nSims), rep(rep(1,nPop), nSims)), nrow=nPop)
### Creates vectors of means and SDs of species richness for both communities, and populates them by running each simulation 'nGen' times
mean_SRs_max <- nPop; sd_SRs_max <- sd(1:nPop)
mean_SRs_min <- 1; sd_SRs_min <- 0.1 #should be 0 but avoids warning)
for (gen in 1:nGens){
# runs neutral_generation_speciation on each column, replace=TRUE
sims <- apply(sims, 2, neutral_generation_speciation, speciation_rate=.1)
# make vector of species richness of each column
SRs_vect <- sapply(1:ncol(sims), function(x) species_richness(sims[, x]))
# finds the means and SDs of species richness for the two communities
mean_SRs_max <- c(mean_SRs_max, mean(SRs_vect[1:nSims]))
mean_SRs_min <- c(mean_SRs_min, mean(SRs_vect[(nSims+1):(nSims*2)]))
sd_SRs_max <- c(sd_SRs_max, sd(SRs_vect[1:nSims]))
sd_SRs_min <- c(sd_SRs_min, sd(SRs_vect[(nSims + 1):(nSims * 2)])) }
### Uses input confidence interval and calculated SDs to find margins of error
alpha <- 1-CI/100
marg_err_max <- abs(qnorm(alpha/2)*(sd_SRs_max/sqrt(nPop)))
marg_err_min <- abs(qnorm(alpha/2)*(sd_SRs_min/sqrt(nPop)))
### Finds first generation where margins of error between the two communities overlap; estimate for equilibrium being reached, used by abline in plot
for (gen in 1:nGens){
if((mean_SRs_max[gen] - marg_err_max[gen]) -
(mean_SRs_min[gen] + marg_err_min[gen]) < 0){ equil_gen <- gen; break; }}
### Plots those vectors populated against the generations they were taken from
plot(0:nGens, mean_SRs_max,
# labels axes and graph as a whole
xlab="Generations Elapsed", ylab="Mean Species Richness",
main=paste("Mean Species Richness of",nSims,"Simulations of \n Maximally and Minimally Diverse Communities with a \n Speciation Rate of 0.1 over",nGens,"Generations"),
# adds line, removes datapoints, sets its colour and width
col="mediumblue", type="l", lwd=3,
# disable numerical axes labels, sets edge of axes to origin
xaxt="n", yaxt="n", xaxs="i", yaxs="i", ylim=c(0,nPop), xlim=c(-.5,nGens))
# plots min_species_richness against generations_vect, formats line
lines(0:nGens, mean_SRs_min, col="maroon", lwd=3)
# adds error bars for each line based on pre-calculated margins of error at the (seemingly arbitrary) 0.028 (or 1-CI/100) significance level
arrows(0:nGens, mean_SRs_max + marg_err_max,
0:nGens, mean_SRs_max - marg_err_max, angle=90, code=3, length=0.02)
arrows(0:nGens, mean_SRs_min + marg_err_min,
0:nGens, mean_SRs_min - marg_err_min, angle=90, code=3, length=0.02)
# adds line and explanatory text at estimated point of equilibrium
abline(v=equil_gen, lwd=2, col="darkgreen", lty="dashed")
text(equil_gen, nPop/1.3, "Point of Equilibrium", pos=2, offset=0.5, srt=90)
# adds legend for the lines in topright corner
legend("topright", col=c("mediumblue","maroon"), lwd=3, cex=0.8,
legend=c("Maximum Initial Richness","Minimum Initial Richness"))
# adds custom numerical axes labels, and minor axis ticks
axis(1, seq(0, nGens, by=10), las=1, labels=TRUE)
axis(2, seq(0, nPop, by=10), las=2, labels=TRUE)
axis(1, seq(0, nGens, by=2), lwd.ticks=.3, labels=FALSE)
axis(2, seq(0, nPop, by=2), lwd.ticks=.3, labels=FALSE)
}
# Challenge question B
Challenge_B <- function() {
### Plots a time-series graph of mean species richness over 'nGens' generations simulated 'nSims' times with the neutral theory model, starting with 'nSCs' communities of 'nPop' individuals with varying degrees of initial species richness
#
# Arguments: -
#
# Returns: -
#
########
### Clear all graphics
graphics.off()
### Set parameters: number of simulations, generations, starting conditions and individuals/community
nSims <- 150; nGens <- 50; nSCs <- 11; nPop <- 100 #must be multiple of nSCs-1
# Calculates species richness gap between starting conditions (except first)
SR_interval <- nPop/(nSCs-1)
### Preallocates matricies to use for simulations, and to store means
sims <- matrix(0, nrow=nPop, ncol=nSCs*nSims)
means <- matrix(0, nrow=nGens+1, ncol=nSCs)
means[1,] <- c(1, seq(SR_interval, nPop, length=nSCs-1))
### Populates 1st & last nSims rows w/ communities of min&max species richness
sims[,c(1:nSims)] <- rep(rep(1, nPop), nSims)
sims[,((nSCs-1)*nSims+1):(nSCs*nSims)] <- rep(1:nPop, nSims)
### Populates middle rows with communities of intermediate species richness
for (SC in 1:(nSCs-2)){
for (col in 1:nSims){
# multiple rounds of non-replacement sampling, until length(indivs)>= nSim
indivs <- NULL; while (length(indivs) < nPop){
indivs <- c(indivs, sample(1:(SC * SR_interval), SC * SR_interval)) }
# input into apropriate column
sims[,(SC * nSims + col)] <- indivs[1:nPop] } }
### Creates dataframe of means of species richness for all communities, and populates them by running each simulation 'nGen' times
for (gen in 1:nGens){
# runs neutral_generation_speciation on each column, replace=TRUE
sims <- apply(sims, 2, neutral_generation_speciation, speciation_rate=.1)
# make vector of species richness of each column
SRs_vect <- sapply(1:ncol(sims), function(x) species_richness(sims[, x]))
# finds the means and SDs of species richness for the two communities
for (SC in 1:nSCs){
means[(1+gen), SC] <- mean(SRs_vect[(1+nSims*(SC-1)):(nSims*SC)]) } }
### Plots those vectors populated against the generations they were taken from
# creates palette of colours for the different lines
palette <- rainbow(nSCs)
# plots the first line
plot(0:nGens, means[,1],
# labels axes and graph as a whole
xlab="Generations Elapsed", ylab="Mean Species Richness",
main=paste("Mean Species Richness of",nSims,"Simulations of \n Varyingly Diverse Communities with a \n Speciation Rate of 0.1 over",nGens,"Generations"),
# adds line, removes datapoints, sets its colour and width
col=palette[1], type="l", lwd=2,
# disable numerical axes labels, sets edge of axes to origin
xaxt="n", yaxt="n", xaxs="i", yaxs="i", ylim=c(0,nPop), xlim=c(-.5,nGens))
# plots other means columns against generation number, formats line
for (SC in 2:nSCs){
lines(0:nGens, means[,SC], col=palette[SC], lwd=2) }
# adds legend for the lines in topright corner
legend("topright", col=palette[1:nSCs], lwd=3, cex=0.8, legend=paste(
"Starting Richness of", c(1, seq(SR_interval, nPop, length=nSCs-1))))
# adds custom numerical axes labels, and minor axis ticks
axis(1, seq(0, nGens, by=5), las=1, labels=TRUE)
axis(2, seq(0, nPop, by=10), las=2, labels=TRUE)
axis(1, seq(0, nGens, by=1), lwd.ticks=.3, labels=FALSE)
axis(2, seq(0, nPop, by=2), lwd.ticks=.3, labels=FALSE)
}
# Challenge question C
Challenge_C <- function() {
### Analyses rda files generated from run_cluster.sh, finding the generation at which equilibrium was reached for each size of community
#
# Arguments: -
#
# Returns: character string; explanatory statement
#
########
### Clear all graphics; sets num of iterations and popsizes to plot & analyse
graphics.off()
iter_num <- 100
popsizes <- c(500, 1000, 2500, 5000)
### Creates empty list and populates it with lists of rda files' objects
combined_results <- list()
for (i in 1:iter_num){
# loads each data file based on iter number
fname <- paste0("../../data/output_files/output_file_", i, ".rda")
load(file=fname)
# creates list of objects from data file, adds to list of lists
obj_list <- list(richness_vect, interval_rich, burn_in_generations)
combined_results[[length(combined_results)+1]] <- obj_list }
### Finds number of simulations per size, and creates blank list for ouput
sims_per_size <- iter_num / length(popsizes)
mean_SR_ls <- list()
### Finds information for each set of popsizes
for (popsize in 0:(length(popsizes)-1)) {
# corresponding first and last (by index) simulations for that popsize
start <- sims_per_size * popsize + 1
end <- sims_per_size * (popsize + 1)
# finds burn-in period and richness interval
burn_in <- combined_results[[start + 1]][[3]]
richness_int <- combined_results[[start + 1]][[2]]
# hence generates the vector of corresponding generation numbers
gen_vect <- seq(richness_int, burn_in, by=richness_int)
# creates dataframe of each size's richness_vect, and finds mean of each col
popsize_SRs <- lapply(combined_results[start:end], `[[`, 1)
SR_df <- as.data.frame(do.call(rbind, popsize_SRs))
mean_SR <- sapply(1:ncol(SR_df), function(x) mean(SR_df[, x]))
### Appends vectors of species richness means and generations of each popsize to list of all of them for plotting
mean_SR_ls[[length(mean_SR_ls) + 1]] <- list(log(mean_SR), log(gen_vect+1))}
### Plots mean vectors against generation vectors of each popsize
# creates palette of colours for the different lines
palette <- rainbow(length(popsizes))
# finds upper limit of largest popsize's data - will have highest of each
round_to <- signif(popsizes[length(popsizes)]/100, 1)
ymax <- ceiling(max(mean_SR_ls[[length(popsizes)]][[1]]))
xmax <- ceiling(max(mean_SR_ls[[length(popsizes)]][[2]]))
# finds lower limit of smallest popsize's data - will have lowest of each
ymin <- floor(min(mean_SR_ls[[1]][[1]]))
xmin <- ceiling(min(mean_SR_ls[[1]][[2]]))
# plots the first line
plot(mean_SR_ls[[1]][[2]], mean_SR_ls[[1]][[1]],
# labels axes and graph as a whole
xlab="Log of Generations Elapsed", ylab="Log of Mean Species Richness",
main=paste("Mean Species Richness of",sims_per_size,"Simulations of \n Varyingly-Sized Communities"),
# adds line, removes datapoints, sets its colour and width
col=palette[1], type="l", lwd=2,
# disable numerical axes labels, sets edge of axes to origin, & sets upper limits assuming the largest popsize has the largest values
yaxs="i", xaxs="i", ylim=c(ymin,ymax), xlim=c(xmin,xmax), yaxt="n", xaxt="n")
# plots other means columns against generation number, formats line
for (i in 2:length(popsizes)) {
lines(mean_SR_ls[[i]][[2]], mean_SR_ls[[i]][[1]], col=palette[i], lwd=2) }
# adds legend for the lines in topright corner
legend("bottomright", col=palette[1:length(popsizes)], lwd=3, cex=0.8,
legend=paste("Population Size of", popsizes))
# adds custom numerical axes labels, and minor axis ticks
axis(1, seq(xmin, xmax, by=1), las=1, labels=TRUE)
axis(2, seq(ymin, ymax, by=1), las=2, labels=TRUE)
axis(1, seq(xmin, xmax, by=0.2), lwd.ticks=.3, labels=FALSE)
axis(2, seq(ymin, ymax, by=0.1), lwd.ticks=.3, labels=FALSE)
### Estimates pont at which equilibrium has been reached for each popsize
for (pop in 1:length(popsizes)){
for (SR in 101:length(mean_SR_ls[[pop]][[1]])){
if (mean_SR_ls[[pop]][[1]][SR] < min(mean_SR_ls[[pop]][[1]][(SR-100):(SR-1)])){
print(paste("estimated generation of equilibrium for community of size", popsizes[pop], "is", (SR-50)*richness_int))
break;} } }
### Return an explanatory statement
return(paste("From the curves we can see all communities have equilibrated by aproximately e^7.25, or", signif(exp(7.25), 3), "generations from the start. We can estimate more precisely for each community by finding where the data levels out, eg finding the first datapoint lower than a trailing average of the 50 previous datapoints as above. From these results, we gather that 2*popsize is a sufficient burn-in period here; 8*popsize is unnecessarily large, albeit not a significant issue."))
}
# Challenge question D
Challenge_D <- function() {
### Plots a bargraph of octave class frquencies, representing species abundance distribution from the coalescence model for a community of 100 individuals.
#
# Arguments: -
#
# Returns: character string; statement answering "How many CPU hours were used on the coalescence simulation and how many on the cluster do an equivalent set of simulations (take?)? Why were the coalescence simulations so much faster?"
#
########
### Clear all graphics
graphics.off()
### Assigns community size, speciation rate, and sim number variables
J <- 100; v <- 0.1; nSims <- 1000
### Calculates theta and creates a vector for aggregating octaves of each sim
theta <- v * (J - 1) / (1 - v)
oct_total <- c(0,0)
for (i in 1:nSims){
N <- J
### Creates lineages (size of community) and abundances (to add abundances to) vectors
lineages <- rep(1, J)
abundances <- vector()
### While multiple lineages exist, either coalesence or speciation occurs
while (N > 1){
lin_i <- sample(length(lineages), 2)
if (runif(1) < theta/(theta+N-1)){
abundances <- c(abundances, lineages[lin_i[1]])
}else{
lineages[lin_i[2]] <- lineages[lin_i[1]] + lineages[lin_i[2]] }
### Each loop, the lineages object added to abundances/another lineages object is removed
lineages <- lineages[-lin_i[1]]
N <- N - 1 }
### Final lineages object added to abundance, octaves taken
abundances <- c(abundances, lineages)
oct_total <- sum_vect(oct_total, octaves(abundances))}
### Finds the mean octave frequency, the highest y value rounded up, and the abundance ranges corresponding with each octave class, all for use in plotting
mean_oct <- oct_total/nSims
ymax <- ceiling(max(mean_oct))
x_vals <- 1:length(mean_oct)
x_vals[-1] <- paste0(2^(x_vals[-1]-1), "-", 2^x_vals[-1]-1)
### Creates barplot of the mean species abundance distriubtion, as octaves, for the equilibrium period
barplot(mean_oct, names.arg=x_vals,
# labels axes and graph as a whole
xlab="Species Abundance Ranges", ylab="Mean Abundance Range Frequency",
main="Estimated Species Abundance Distribution as Octaves",
# disable numerical axes labels, sets edge of axes to origin, sets limit
yaxt ="n", yaxs="i", ylim=c(0, ymax))
# adds custom numerical x axis labels and minor ticks
axis(2, seq(0, ymax, by=1), las=1, labels=TRUE)
axis(2, seq(0, ymax, by=0.2), lwd.ticks=.3, labels=FALSE)
return("I used a cluster time of 11 hours, whereas this coalescence simulation is near-instantaneous for the same parameters; however, I'd argue they are not equivalent (and in fact, as fundamentally different methods, cannot be equivalent). The simulations run on the cluster and question 16 are averaged over a vast period of time, wheareas this simulation has but one output. As such, there is much higher fluctuation in the results of this coalescence simulation.
Running this coalesence simulation many times and taking an average, as I did, leads to a much more stable result, and makes it about 10x faster than question 16 executing the same number of simulations.
Using this technique, we don't simulate or track a species that will go extinct, since we're effectively starting from the end and tracing back to the speciation events, hence there is no wasted processing time on that. There is also no wasted (assuming we're not interested in it) burn-in period, and we're generating less data to analyse and calling fewer functions.")
}
# Challenge question E
Challenge_E <- function(){
### Generates and plots a series of fractal shapes based off different parameters of the chaos_game function, and returns an explanatory statement.
#
# Arguments: -
#
# Returns: character string; statement describing and explaining the generated shapes
#
########
### Clear all graphics; creates gradiated palette from black to blue
graphics.off()
pal <- colorRampPalette(c("black", "blue", "red", "orange"))(20)
### Create and manipulate vectors of coordinates for use
equi_pts <- c(c(0,0), c(10,0), c(5,5*sqrt(3))) #equilateral triangle
mid_pt <- c(5, 5 / 3 * sqrt(3)) #equilateral triangle center
half_mid_pts <- (equi_pts+mid_pt)/2 #halfway between verticies and midpoint
mid_equi_pts <- (equi_pts+c(equi_pts[5:6], equi_pts[1:4]))/2 #halfway points between verticies
### Creates list of vectors using chaos_game() function, for later plotting
matricies <- list(
### 3 versions of Sierpinski Gasket with incramental colouring of points & different starting points. Lower reps numeber needed to stop overwriting
chaos_game(points=matrix(c(equi_pts), byrow=T, ncol=2), dist=1/2, plot=F, X=c(0,0), reps=5000), #startpoint = bottom left vertex
chaos_game(points=matrix(c(equi_pts), byrow=T, ncol=2), dist=1/2, plot=F, X=c(mid_pt), reps=5000), #startpoint = middle of equilateral triangle
chaos_game(points=matrix(c(equi_pts), byrow=T, ncol=2), dist=1/2, plot=F, X=c(10,5*sqrt(3)), rep=5000), #startpoint = x coord of right vertex and y coord of top
### Sierpinski Gasket - dist 1/2 only as explained in text
chaos_game(points=matrix(c(equi_pts), byrow=T, ncol=2), dist=1/2, plot=F),
### Like Sierpinski Gasket, but with points half way between each pair of verticies; identical but only when on 1/3 rather than 1/2
chaos_game(points=matrix(c(mid_equi_pts, equi_pts), byrow=T, ncol=2),
dist=1/3, plot=F),
# As above but with distance of 3/7
chaos_game(points=matrix(c(mid_equi_pts, equi_pts), byrow=T, ncol=2),
dist=3/7, plot=F),
### Like Sierpinski Gasket but with extra point at center of triangle
chaos_game(points=matrix(c(equi_pts, mid_pt), byrow=T, ncol=2),
dist=1/2, plot=F),
# As above but with distance of 3/7
chaos_game(points=matrix(c(equi_pts, mid_pt), byrow=T, ncol=2),
dist=3/7, plot=F),
### Equilateral with points halfway between midpoint and each vertex
chaos_game(points=matrix(c(half_mid_pts, equi_pts), byrow=T, ncol=2),
dist=4/11, plot=F),
### No equilateral triangle points; only points halfway between midpoint and each vertex AND points half way between each pair of verticies
chaos_game(points=matrix(c(half_mid_pts, mid_equi_pts), byrow=T, ncol=2),
X=mid_pt,dist=1/3, plot=F),
# As above but with extra point at center
chaos_game(points=matrix(c(half_mid_pts, mid_equi_pts, mid_pt), byrow=T, ncol=2), X=mid_pt, dist=1/3, plot=F),
# As above but with distance of 4/11
chaos_game(points=matrix(c(half_mid_pts, mid_equi_pts, mid_pt), byrow=T, ncol=2), X=mid_pt, dist=4/11, plot=F))
### Sets structure of multiplot, populates it one plot at a time (tollerates any number of plots)
dev.new(width = 10, height = 12)
par(mfcol = c(ceiling(length(matricies) / 3), 3))
for (i in 1:length(matricies)) {
# calculates and inputs the row and col number for the plot in in multiplot
row <- ceiling(i / 3)
if(i %% 3 == 0){ col <- 3
}else{ col <- i %% 3 }
par(mfg = c(row, col))
### First 3, with different starting points, are plotted using gradient of colours varying for each 1/20th of the datapoints in order
if (i<4){
subset_size <- nrow(matricies[[i]]) / 20
# plots first subset black
plot(matricies[[i]][1:subset_size,], cex = .001, pch = 20, axes = FALSE, ann = FALSE)
for (subset in 2:20){ # layers on other subsets, coloured with palette
subset_range <- ((subset-1)*subset_size+1):(subset*subset_size)
points(matricies[[i]][subset_range,], cex=.001, pch=20, col=pal[subset])
### Else plots whole matrix, with small black dots and no axes or labels
}}else{ plot(matricies[[i]], cex=.001, pch=20, axes=FALSE, ann=FALSE, col=pal[(i-2)*2]) }}
return("Changing the startpoint (X) seems to make no difference; it leads to a few points outside the shape but they quickly find their way to it. When a seed is set, the distribution of dots is, after the first few, identical regardless of X. (See shapes 1-3, enlarge to make clearer)
Decreasing distance from 0.5, triangular fractals' small constituent units shrink and move towards the closest vertex; increasing, they start to overlap and converge on eachother. (See shapes 1-4 and 7)
Adding extra points, and different combinations of those points, lead to various other fractals, most of which require a distance of rational, repeating decimals close to but less than 0.5. (See shapes 5-12, and the annotated code for more details)")
}
draw_shape <- function(fun, adj_len, adj_ang=pi/4, grad=TRUE, e=0.001, length=.1, lwd=1){
### Calls and visualises the whicheevr of the "tree", "fern" or "fern2" functions are input with altered parameters
#
# Arguments:
# fun - function; "tree", "fern" or "fern2" function to be used
# adj_len - numeric vector; modulates length of subsequent line segments from new start position
# adj_ang - radian; modulates angle of subsequent line segments from new start position
# grad - logical; if TRUE, a colour gradient will be implemented, colouring the line between brown and light green based on input length
# e - +ve numeric; smaller than input length; determines the endpoint of the recursive calls, relying on the function to incrementally decrease the size of "length"
# length - +ve numeric; sets length of line from start position
# lwd - +ve numeric; sets width of line
#
# Returns: -
#
########
### Opens a new, blank plot
frame()
### Calls and thus visualises the fern function with a combination of default parameters and arguments of the draw_shape function
fun(c(.5, 0), pi/2, length, e=e, grad=grad, lwd=lwd, adj_len=adj_len, adj_ang=adj_ang)
}
# Challenge question F
Challenge_F <- function() {
### Generates and plots a series of fractal shapes resembling organisms based off different parameters of the draw_shape function, and returns a statement.
#
# Arguments: -
#
# Returns: character string; statement explaining the effects of decreasing the input e
#
########
### Clear graphics
graphics.off()
### Open new, wide 2x4 plot, and plots all 8 shapes
dev.new(width = 9, height = 6)
par(mfcol=c(2,4))
# Connifer
par(mfg=c(1,1))
draw_shape(tree, c(0.61,0.61), pi/4, length=.14, grad=TRUE, lwd=5)
title(main="Connifer", line=-6, cex.main=2)
# Bush
par(mfg=c(1,2))
draw_shape(tree, c(0.64,0.64), pi/2.5, length=.14, grad=TRUE, lwd=3)
title(main="Bush", line=-6, cex.main=2)
# Shrimptail
par(mfg=c(1,3))
draw_shape(tree, c(0.78,0.48), pi/6, length=.13, grad=FALSE, lwd=.4)
title(main="Shrimp Tail", line=-6, cex.main=2)
# Trilobyte
par(mfg=c(1,4))
draw_shape(tree, c(0.94,0.31), pi/8, length=.1, grad=FALSE, lwd=.2)
title(main="Trilobite", line=-6, cex.main=2, adj=.1)
# Vines
par(mfg=c(2,1))
draw_shape(tree, c(0.29,0.83), pi/2.5, length=.23, grad=TRUE, lwd=2)
title(main="Vines", line=-6, cex.main=2, adj=.7)
# Wheat
par(mfg=c(2,2))
draw_shape(fern, c(0.35,0.85), pi/8, length=.07, grad=FALSE, lwd=.4)
title(main="Wheat", line=-6, cex.main=2)
# Italian Cyprus Tree
par(mfg=c(2,3))
draw_shape(fern2, c(0.38, 0.88), pi / 8, length=.06, grad=TRUE, lwd=3)
title(main="Italian Cyprus Tree", line=-6, cex.main=2)
# Christmas Tree
par(mfg=c(2,4))
draw_shape(fern2, c(0.43,0.85), pi/2, length=.08, grad=TRUE, lwd=3)
title(main="Merry Christmas", line=-6, cex.main=2)
return("Time taken and level of detail added seems to increase exponentially as e is lowered. This is because as e decreases, additional levels of detail (sub-pinnules of the fern if you will) are added, and the surface area upon which they're added is exponentially larger than at any of the previous levels.")
} | /week10/code/bjn20/bjn20_HPC_2020_main.R | no_license | Bennouhan/cmeecoursework | R | false | false | 62,840 | r |
# Script: bjn20_HPC_2020_main.R
#
# Desc: CMEE 2020 HPC excercises R code main proforma
#
# Arguments:
# -
#
# Output:
# -
#
# Date: 21 Dec 2020
#
name <- "Ben Nouhan"
preferred_name <- "Ben"
email <- "bjn20@ic.ac.uk"
username <- "bjn20"
personal_speciation_rate <- 0.0052206
# Question 1
species_richness <- function(community){
### Measures species richness of a community; returns number of unique objects present in an input vector
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
#
# Returns: integer 1 to Inf; the count of species types present in community
########
### Removes duplicates from vector; counts objects in vector
return(length(unique(community)))
}
# Question 2
init_community_max <- function(size){
### Generates initial state for a simulation community, with max possible number of species of the community based on its size
#
# Arguments:
# size - integer 1 to Inf; number of individuals in a community
#
# Returns: vector of +ve integers; sequence of 1 to community size increasing by 1 each time
#
########
### Generates sequence of integers from 1 to input integer, by 1
return(1:size)
}
# Question 3
init_community_min <- function(size){
### Generates initial state for a simulation community, with min possible number of species of the community based on its size
#
# Arguments:
# size - integer 1 to Inf; number of individuals in a community
#
# Returns: vector of integers; repeated sequence of 1s, length equal to that of community size
#
########
### Generates sequence of 1s equal in length to value of input integer
return(rep(1, size))
}
# Question 4
choose_two <- function(max_value){
### Generates vector of 2 objects from a sequence of integers, randomly sampled without replacement
#
# Arguments:
# max_value - integer 1 to Inf; maximum value present in a numeric vector
#
# Returns: vector of integers; two randomly sampled (without replacement) integers from sequence of 1 to max_value
#
########
### Takes 2 random samples without replacement (replacement default = FALSE) from 1:max_value
return(sample(max_value, 2))
}
# Question 5
neutral_step <- function(community){
### Performs a single step of a simple neutral model simulation on a community vector (no speciation)
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
#
# Returns: vector of +ve integers; alterered version of community, whereby one object has been replaced by one of the others
#
########
### Randomly choose index of 2 individuals of community
pair <- choose_two(length(community))
### Replace first with value of second within community
community[pair[1]] <- community[pair[2]]
return(community)
}
# Question 6
neutral_generation <- function(community){
### Simulates a generation's worth of neutral steps, first calculating how many steps must occur in a generation (no speciation)
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
#
# Returns: vector of +ve integers; alterered version of community, whereby num_steps changes have been made by calling neutral_step
#
########
### Finds number of neutral steps needed for generation
num_steps <- length(community)/2
# if number of individuals is odd, randomly sample from n/2 rounded up (ceiling) or down (floor)
num_steps <- ifelse(num_steps %% 1 == .5, sample(c(ceiling, floor), 1)[[1]](num_steps), num_steps)
### Loop repeating neutral step "num_steps" times
for (step in 1:num_steps) {
community <- neutral_step(community) }
return(community)
}
# Question 7
neutral_time_series <- function(community,duration) {
### Simulates generations of the community based on neutral theory, and finds the species richness of each generation (no speciation)
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
# duration - +ve integer; number of generations the simulation is to be run for
#
# Returns: vector of +ve integers; time series of species richness at each stage, from gen(0) to gen(duration); length is duration+1
#
########
### Finds starting species richness, creates vector
rich_vect <- species_richness(community)
### Simulates next generation "duration" times, adding species richness of each generation to above vector
for (gen in 1:duration){
community <- neutral_generation(community)
rich_vect <- c(rich_vect, species_richness(community)) }
return(rich_vect)
}
# Question 8
question_8 <- function() {
### Plots a time-series graph of species richness over 200 generations simulated with the neutral theory model, starting with a community of 100 individuals all of different species (no speciation)
#
# Arguments: -
#
# Returns: character string; statement answering "What state will the system always converge to if you wait long enough, and why?"
#
########
### Clear all graphics
graphics.off()
### Generate the data
generations_vect <- 0:200
species_richness_vect <- neutral_time_series(1:100, 200)
### Plots species_richness_vect against generations_vect
plot(generations_vect, species_richness_vect,
# labels axes and graph as a whole
xlab="Generations Elapsed", ylab="Species Richness",
main="Species Richness of a Community \nover 200 Generations",
# adds line, removes datapoints, sets its colour and width
col="mediumblue", type="l", lwd=3,
# disable numerical axes labels, sets edge of axes to origin
xaxt="n", yaxt="n", xaxs="i", yaxs="i", ylim=c(0, 100), xlim=c(-.5, 200))
# adds custom numerical axes labels, and minor axis ticks
axis(1, seq(0, 200, by=20), las=1, labels=TRUE)
axis(2, seq(0, 100, by=10), las=2, labels=TRUE)
axis(1, seq(0, 200, by=4), lwd.ticks=.3, labels=FALSE)
axis(2, seq(0, 20, by=2), lwd.ticks=.3, labels=FALSE)
### Statement
return("The system will always converge on a state of monodominance. Since no speciation can occur in this model, each step can only reduce or maintain species richness, hence species richness will decrease over time until it reaches the mininmum of 1.")
}
# Question 9
neutral_step_speciation <- function(community,speciation_rate) {
### Performs a single step of a simple neutral model simulation on a community vector, with speciation as a possibility
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
# speciation_rate - numeric 0 to 1; parameter to set rate at which speciation occurs when an object is replaced
#
# Returns: vector of +ve integers; alterered version of community, whereby one object has been replaced by one of the others, or a new number
#
########
### Randomly choose index of 2 individuals of community
pair <- choose_two(length(community))
### Applies a probability of "speciation_rate" that the replacement number is changed to a new number (ie speciation occurs)
if (runif(1) < speciation_rate){ pair[[2]] <- max(community)+1 #quicker to argue preallocated runif dist?#this number may have existed; doesnt matter for species richness but may for other stuff; ideally use count
}else{ pair[[2]] <- community[pair[2]] }
### Replace first with value of second within community
community[pair[1]] <- pair[2]
return(community)
}
# Question 10
neutral_generation_speciation <- function(community,speciation_rate) {
### Simulates a generation's worth of neutral steps, first calculating how many steps must occur in a generation, with speciation as a possibility
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
# speciation_rate - numeric 0 to 1; parameter to set rate at which speciation occurs when an object is replaced
#
# Returns: vector of +ve integers; alterered version of community, whereby num_steps changes have been made by calling neutral_step_speciation
#
########
### Finds number of neutral steps needed for generation
num_steps <- length(community)/2
# if number of individuals is odd, randomly sample from n/2 rounded up (ceiling) or down (floor)
num_steps <- ifelse(num_steps %% 1 == .5, sample(c(ceiling, floor), 1)[[1]](num_steps), num_steps)
### Loop repeating neutral step (with speciation) "num_steps" times
for (step in 1:num_steps) {
community <- neutral_step_speciation(community,speciation_rate) }
return(community)
}
# Question 11
neutral_time_series_speciation <- function(community,speciation_rate,duration) {
### Simulates generations of the community based on neutral theory, and finds the species richness of each generation, with speciation as a possibility
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
# speciation_rate - numeric 0 to 1; parameter to set rate at which speciation occurs when an object is replaced
# duration - +ve integer; number of generations the simulation is to be run for
#
# Returns: vector of +ve integers; time series of species richness at each stage, from gen(0) to gen(duration); length is duration+1
#
########
### Finds starting species richness, creates vector
rich_vect <- species_richness(community)
### Simulates next generation "duration" times, adding species richness of each generation to above vector
for (gen in 1:duration){
community <- neutral_generation_speciation(community,speciation_rate)
rich_vect <- c(rich_vect, species_richness(community)) }
return(rich_vect)
}
# Question 12
question_12 <- function() {
### Plots a time-series graph of species richness over 200 generations simulated with the neutral theory model, starting with a community of 100 individuals all of different species (blue) and all the same species (red), with speciation as a possibility
#
# Arguments: -
#
# Returns: character string; statement explaining the effect intitial conditions had, and answering "Why does the neutral model give these particular results?"
#
########
### Clear all graphics
graphics.off()
### Generate the data; one richness vector with each max and min richness
generations_vect <- 0:200
max_species_richness <- neutral_time_series_speciation(1:100, 0.1, 200)
min_species_richness <- neutral_time_series_speciation(rep(1,100), 0.1, 200)
### Plots max_species_richness against generations_vect
plot(generations_vect, max_species_richness,
# labels axes and graph as a whole
xlab="Generations Elapsed", ylab="Species Richness",
main="Species Richness of Maximally and Minimally Diverse Communities \n with a Speciation Rate of 0.1 over 200 Generations",
# adds line, removes datapoints, sets its colour and width
col="mediumblue", type="l", lwd=3,
# disable numerical axes labels, sets edge of axes to origin
xaxt="n", yaxt="n", xaxs="i", yaxs="i", ylim=c(0, 100), xlim=c(-.5, 200))
# plots min_species_richness against generations_vect, formats line
lines(generations_vect, min_species_richness, col="maroon", lwd=3)
# adds legend for the lines in topright corner
legend("topright", col=c("mediumblue","maroon"), lwd=3, cex=0.8,
legend=c("Maximum Initial Richness","Minimum Initial Richness"))
# adds custom numerical axes labels, and minor axis ticks
axis(1, seq(0, 200, by=20), las=1, labels=TRUE)
axis(2, seq(0, 100, by=10), las=2, labels=TRUE)
axis(1, seq(0, 200, by=4), lwd.ticks=.3, labels=FALSE)
axis(2, seq(0, 100, by=2), lwd.ticks=.3, labels=FALSE)
### Statement
return("Speciation introduces a force to increase species richness, where before there was only a force to decrease it, extinction. The community of maximum species richness has nowhere to go but a species richness decrease, and vice versa. They will hence meet in the middle, where these two forces reach an equilibrium. With such a small sample, random variation introduces significant fluctuations even at equilibrium, but as population size or number ov averaged simulations increases, these lines will converge as generations pass.")
}
# Question 13
species_abundance <- function(community) {
### Converts input vector into vector of species frequencies in descending order
#
# Arguments:
# community - vector of +ve integers; collection of individuals in the community, each object is an integer giving the species of the individual in that position
#
# Returns: vector of +ve integers; vector of species frequencies in descending order
#
########
### Makes frequency table of vector, sorts in descending order, converts to vector
return(as.vector(sort(table(community), decreasing=TRUE)))
}
# Question 14
octaves <- function(abundance_vector) {
### Bins the argued species abundancies into octave classes
#
# Arguments:
# abundance_vector - vector of +ve integers; vector of species frequencies of a community in descending order
#
# Returns: vector of integers 0 to Inf; vector of octave class frequencies in ascending order of class size
#
########
### log2()+1 transforms data, rounds each down, finds frequencies of result
return(tabulate(floor(log2(abundance_vector)+1)))
}
# Question 15
sum_vect <- function(x, y) {
### Sums two vectors, after adding tailing zeros to the shorter vector in vectorised manner if there is one
#
# Arguments:
# x, y - vectors of integers 0 to Inf; vectors to be summed
#
# Returns: vector of integers 0 to Inf; sum of vectors x and y
#
########
### Makes list of the vectors; finds length of longest one
vector_ls <- list(x,y)
max_length <- max(sapply(vector_ls, length))
### Adds zeros to shorter vector in list, making them same length
vector_ls <- lapply(vector_ls, function(x) c(x, rep(0, max_length-length(x))))
return(vector_ls[[1]] + vector_ls[[2]])
}
# Question 16
question_16 <- function() {
### Plots a bargraph of mean octave class frquencies, representning mean species abundance distribution, from 100 evenly-spaced samples of 2000 post-burn-off period generations of neutral model simulations for a community of 100 individuals.
#
# Arguments: -
#
# Returns: character string; statement answering "Does the initial condition of the system matter, and why/why not?"
#
########
### Clear all graphics
graphics.off()
### Generate the community of 100 individuals with maximum species richness
community_max <- init_community_max(100)
### Attain state of community after 200-gen burn-in period, and the octave
for (gen in 1:200){
community_max <- neutral_generation_speciation(community_max, 0.1)}
oct_total <- octaves(species_abundance(community_max)); oct_count <- 1
### Run simulation 2000 more times, take a sample every 20 generations, add to total, and ultimately didvide by number of octaves total for the mean
for (gen in 1:2000){
community_max <- neutral_generation_speciation(community_max, 0.1)
if (gen %% 20 == 0){
oct_total <- sum_vect(oct_total,octaves(species_abundance(community_max)))
oct_count <- oct_count + 1 }}
### Finds the mean octave frequency, the highest y value rounded up, and the abundance ranges corresponding with each octave class, all for use in plotting
mean_oct <- oct_total/oct_count
ymax <- ceiling(max(mean_oct))
x_vals <- 1:length(mean_oct)
x_vals[-1] <- paste0(2^(x_vals[-1]-1), "-", 2^x_vals[-1]-1);
### Creates barplot of the mean species abundance distriubtion, as octaves, for the equilibrium period
barplot(mean_oct, names.arg=x_vals,
# labels axes and graph as a whole
xlab="Species Abundance Ranges", ylab="Mean Abundance Range Frequency",
main="Estimated Mean Species Abundance Distribution as Octaves \n over 2000 Generations",
# disable numerical axes labels, sets edge of axes to origin, sets limit
yaxt ="n", yaxs="i", ylim=c(0, ymax))
# adds custom numerical x axis labels and minor ticks
axis(2, seq(0, ymax, by=1), las=1, labels=TRUE)
axis(2, seq(0, ymax, by=0.2), lwd.ticks=.3, labels=FALSE)
return("The initial condition of the system does not matter using these parameters; as explained in question 12, 200 generations (the burn-off period used here) is more than enough time for equilibrium to be reached from the two extremes of species richness. Hence, at the start of the subsequent 2000 generation simulation, the expected start-point regardless of initial conditions is expected to be the same, differing only due to random fluctuations owing to the small sample size.")
}
# Question 17
cluster_run <- function(speciation_rate, size, wall_time, interval_rich, interval_oct, burn_in_generations, output_file_name) {
### runs the neutral_generation_speciation function repeatedly, periodically analysing the results, all in accordance with the parameters, saving all results to an argued rda file
#
# Arguments:
# speciation_rate - numeric 0-1; determines frequency of speciation
# size - +ve integer; determines population size of communies
# wall_time - +ve integer; determines how long each simulation runs, in minutes
# interval_rich - +ve integer; determines the generation interval at which species richness is taken during burn-in period
# interval_oct - +ve integer; determines the geberation interval at which octaves of species abundance are taken throughout simulations
# burn_in_generations - +ve integer; number of generations before community expected to be comfortably in equilibrium
# output_file_name - char string; name of output rda file
#
# Returns: -
#
########
### Generates community of specified size and of minimum diversity
community <- init_community_min(size)
### Starts clock; creates generation count, and empty data structures to fill
ptm <- proc.time(); nGens <- 1; richness_vect <- NULL; oct_vect <- list()
### Simulates generatrions until 60*wall_time seconds have elapsed
while(proc.time()[3] - ptm[3] < 60*wall_time){
community <- neutral_generation_speciation(community, speciation_rate)
# saves species richness at given interval during given burn in period
if (nGens %% interval_rich == 0 & nGens <= burn_in_generations){
richness_vect <- c(richness_vect, species_richness(community)) }
# saves octaves of species_abundance of community at given interval
if (nGens %% interval_oct == 0){
oct_vect <- c(oct_vect, list(octaves(species_abundance(community)))) }
# adds to generation count
nGens <- nGens + 1 }
# saves finishing time for input to rda file
elapsed_time <- proc.time()[3] - ptm[3]
### Saves final versions of objects created in script, and time elapsed,
save(richness_vect, oct_vect, community, elapsed_time,
# alongside all original parameters, as a file as named in initial argument
speciation_rate, size, wall_time, interval_rich, interval_oct, burn_in_generations, file=output_file_name)
}
# Question 20
process_cluster_results <- function(iter_num=100, popsizes=c(500, 1000, 2500, 5000)) {
### Analyses rda files generated from run_cluster.sh, finding the mean octave of species abundancies for each size of community and saving them to a new rda file
#
# Arguments:
# iter_num - +ve integer; determines number of rda files to be analysed, same as number of iterations run in run_cluster.sh
# popsizes - vector or integers; gives the popsizes to do this analysis on, assuming each have the same number of iterations
#
# Returns: list of vectors; one vector of average species abundancy octave classes of all simulations of the same population size for each population size
#
########
### Creates empty list and populates it with lists of rda files' objects
combined_results <- list()
for (i in 1:iter_num){
# loads each data file based on iter number
fname <- paste0("../../data/output_files/output_file_", i, ".rda")
load(file=fname)
# creates list of objects from data file, adds to list of lists
obj_list <- list(richness_vect, oct_vect, community, elapsed_time, speciation_rate, size, wall_time, interval_rich, interval_oct, burn_in_generations)
combined_results[[length(combined_results)+1]] <- obj_list }
### Finds number of simulations per size, and creates blank list for ouput
sims_per_size <- iter_num / length(popsizes)
mean_oct_ls <- list()
### Finds information for each set of popsizes
for (popsize in 0:(length(popsizes)-1)) {
# corresponding first (by index) simulation for that popsize
size_start <- popsize * sims_per_size
# finds burn-in period, oct interval, & hence number of octaves to remove
burn_in <- combined_results[[size_start + 1]][[10]]
oct_int <- combined_results[[size_start + 1]][[9]]
burn_in_octs <- burn_in/oct_int
### Creates list for all post-burn in octaves for all simulations of a given population size, and populates it
multisim_oct_vect <- list()
for (sim in 1:sims_per_size) {
# for each simulation...
iter <- size_start + sim
# extracts oct_vect and removes octaves taken during burn-in
oct_vect <- combined_results[[iter]][[2]]
postBI_oct_vect <- oct_vect[(burn_in_octs+1):length(oct_vect)]
# appends those left to the master list
multisim_oct_vect <- c(multisim_oct_vect, postBI_oct_vect) }
### Makes all octaves in each master list the same length with 0s
max_length <- max(sapply(multisim_oct_vect, length))
multisim_oct_vect <- lapply(multisim_oct_vect, function(x) c(x, rep(0, max_length - length(x))))
### Converts list of vectors to dataframe, calculates mean of each column
oct_df <- as.data.frame(do.call(rbind, multisim_oct_vect))
mean_oct <- sapply(1:max_length, function(x) mean(oct_df[, x]))
### Appends vecotor of means of each popsize to list of all of them
mean_oct_ls[[length(mean_oct_ls) + 1]] <- mean_oct }
### Saves as .rda file (not actually used) and returns list of lists
save(mean_oct_ls, file = "mean_oct_ls.rda")
return(mean_oct_ls)
}
plot_cluster_results <- function() {
### Plots mean octave classes for each popsize as barcharts, from the results of cluster_run, and saves the underlying data to a new rda file
#
# Arguments: -
#
# Returns: list of vectors; one vector of average species abundancy octave classes of all simulations of the same population size for each population size
#
########
### Clear all graphics; sets num of iterations and popsizes to plot & analyse
graphics.off()
iter_num <- 100
popsizes <- c(500, 1000, 2500, 5000)
### Extracts mean octaves per population size from results of rda files
mean_oct_ls <- process_cluster_results(iter_num, popsizes)
### Sets structure of multiplot, populates it one barplot at a time (tollerates any number of iters and popsizes)
dev.new(width = 16.4, height = 10)
par(mfcol = c(ceiling(length(popsizes)/2), 2))
for (i in 1:length(popsizes)){
# gets corresponding shade of grey and vector, and the plot's y axis limit and interval based off of it
mean_oct <- mean_oct_ls[[i]]
ymax <- ceiling(max(mean_oct))
yint <- ceiling(ymax/5)/2
pal <- colorRampPalette(c("lightgrey", "grey20"))(length(popsizes))
print(col)
# creates x axis label of species abundance ranges corresponding to octaves
x_vals <- 1:length(mean_oct)
x_vals[-1] <- paste0(2 ^ (x_vals[-1] - 1), "-", 2 ^ x_vals[-1] - 1)
# calculates and inputs the row and col number for the plot in in multiplot
row <- ceiling(i/2); col <- ifelse(i%%2!=0, 1, 2)
par(mfg = c(row, col))
### Plots barplot of means of octave class values
barplot(mean_oct, names.arg=x_vals, cex.names=.8,
# labels axes and graph as a whole
xlab="Species Abundance Ranges", ylab="Mean Abundance Range Frequency",
main=paste("Mean Species Abundance Distribution as Octaves \n for Communities of", popsizes[i], "individuals"),
# disable numerical axes labels, sets edge of axes to origin, sets limit
yaxt="n", yaxs="i", ylim=c(0, ymax), col=pal[i])
# adds custom numerical x axis labels and minor ticks
axis(2, seq(0, ymax, by=yint), cex.axis=0.8, las=1, labels=TRUE)
axis(2, seq(0, ymax, by=yint/5), lwd.ticks=.3, labels=FALSE) }
### Returns the data plotted within the function
return(mean_oct_ls)
}
# Question 21
question_21 <- function() {
answer <- list(log(8)/log(3), "8 units of the constiuent, smaller object are needed to increase the width & height of the larger by 3 times: hence, where the number of dimensions is given by x, 3^x = 8, and x = log(8)/log(3)")
return(answer)
}
# Question 22
question_22 <- function() {
answer <- list(log(20)/log(3), "20 units of the constiuent, smaller object are needed to increase the width, height and depth of the larger by 3 times: hence, where the number of dimensions is given by x, 3^x = 20, and x = log(20)/log(3)")
return(answer)
}
# Question 23
chaos_game <- function(A=c(0,0), B=c(3,4), C=c(4,1), X=c(0,0), points=NULL, dist=.5, reps=100000, plot=TRUE){
### Generates coordinates to plot a fractal, trianglular shape (or other shapes with non-default arguments), and either plots the shape and returns an explanatory statement (default), or only returns the coords matrix used to plot the shape, depending on "plot" argument
#
# Arguments:
# A, B, C - coordinates vectors; coordinates for the shape to form between
# X - coordinates vector; start point for the shape
# points - matrix of coordinate vectors; replaces the matrix otherwise made from A, B and C, allowing more than 3 points; coordinates muct be concatenated by row, where ncol(points)==2 and nrow(points)==[number of coordinates sets]
# dist - +ve numeric; fraction of the distance moved by point in coords to randomly sampled point
# reps - +ve integer; number of times the last coordinate in the coords matrix will move halfway towards a randomly chosen point from A, B and C; increases resolution
# plot - logical; if TRUE (default), character string is returned and generated shape is plotted; else, coords matrix is returned
#
# Returns:
# plot=TRUE - character string; statement describing and explaining the generated shape
# plot=FALSE - matrix of coordinate vectors; coords matrix generated by the function, length==reps, contains all coordinates needed to plot the shape
#
########
### Clear all graphics; sets num of iterations and popsizes to plot & analyse
graphics.off(); set.seed(2)
### Creates matrix of provided points if a suitable matrix isn't provided, & preallocates one to be populated w/ coordinates constituting the shape
if(is.null(points)){ points <- matrix(c(A, B, C), byrow=TRUE, ncol=2) }
coords <- matrix(rep(X, reps), byrow=TRUE, ncol=2)
### Creates a vector of sampled numbers 1-3, uses it to generate a matrix of randomly chosen coordinates from "points"
rnums <- sample(nrow(points), reps, replace=TRUE)
rpoints <- points[rnums,]
### Populates coords with coordinates half way between previous coordinates and the corresponding randomly sampled point from "rpoints"
for (rep in 2:reps){
coords[rep,] <- (coords[rep-1,] + rpoints[rep-1,])*dist }
### If plot is FALSE (TRUE by default), not plot, returns coords matrix
if (plot == FALSE){ return(coords)
### Else plots the shape generated without axes, returns explanatory statement
}else{ plot(coords, cex=.001, pch=20, axes=FALSE, ann=FALSE)
return("This code generates a fractal shape with dimension of log(3)/log(2), equivalent to the shape in Q21 but with triangles rather than squares. The plane it's on appears not to be flat on the screen because the points are not equally spaced. In fact, a 2D equalateral triangle with integer coordinates cannot exist.") }
}
# Question 24
turtle <- function(start_position, direction, length, grad=FALSE, lwd=1) {
### Draws a segment between the input coordinate and the output, which varies depending on parameters
#
# Arguments:
# start_position - coordinates vector; first coordinate for the line segment
# direction - radian; sets angle of the line segment from start position
# length - +ve numeric; sets length of line from start position
# grad - logical; if TRUE, a colour gradient will be implemented, colouring the line between brown and light green based on input length
# lwd - +ve numeric; sets width of line
#
# Returns:
# end_position - coordinates vector; second coordinate for the line segment
#
########
### Calculates end point from parameters
end_position <- start_position + length * sin(c(pi/2-direction, direction))
### Uses colour ramp palette from brown to green based off length, if set on
ifelse(grad==TRUE, col <- colorRampPalette(c("brown", "darkgreen", "lightgreen"))(9)[ceiling(-log2(length))-2], col <- "black")
### Draws a line between start and end positions, and returns the latter
lines(rbind(start_position, end_position), col=col, lwd=lwd)
return(end_position)
}
# Question 25
elbow <- function(start_position, direction, length) {
### Draws a segment from the input coordinate based on parameters by calling the turtle function once, and then again to draw a second, shorter line from the end of the first at a pre-set angle
#
# Arguments:
# start_position - coordinates vector; first coordinate for the line segment
# direction - radian; sets angle of the line segment from start position
# length - +ve numeric; sets length of line from start position
#
# Returns: -
#
########
### Calls turtle to draw first line, captures end position for second call
end_position <- turtle(start_position, direction, length)
### Calls turtle to draw second line, catches return to avoid unwanted output
junk <- turtle(end_position, direction-pi/4, length*0.95)
}
# Question 26
spiral <- function(start_position, direction, length) {
### Calls the turtle function with the input parameters, then calls itself recursively with altered parameters until length decreases below 0.001
#
# Arguments:
# start_position - coordinates vector; first coordinate for the line segment
# direction - radian; sets angle of the line segment from start position
# length - +ve numeric; sets length of line from start position
#
# Returns: character string; Explains why a problem is encountered without setting the aformentioned length limit
#
########
### Calls turtle to draw first line, captures end position for next call
end_position <- turtle(start_position, direction, length)
### Calls itself recursively with altered parameters until length < 0.001
if (length > 0.001){
spiral(end_position, direction-pi/4, length*0.95)}
return("The function is calling itself recursively, stuck in an infinite loop (or spiral, if you will), so the computer rapidly reaches the default memory limit set by R and returns an error.")
}
# Question 27
draw_spiral <- function() {
### Calls and visualises the spiral function with the given parameters
#
# Arguments: -
#
# Returns: -
#
########
### Clears all graphics; opens a new, blank plot
graphics.off(); frame()
### Calls and thus visualises the spiral function with given parameters
spiral(c(.3, .7), 0, .3)
}
# Question 28
tree <- function(start_position=c(.5,0), direction=pi/2, length=.2, e=0.001, grad=FALSE, lwd=1, adj_len=c(0.65,0.65), adj_ang=pi/4) {
### Calls the turtle function with the input parameters, then calls itself recursively with two sets of altered parameters, one at a time, until length decreases below input value of e
#
# Arguments:
# start_position - coordinates vector; first coordinate for the line segment
# direction - radian; sets angle of the line segment from start position
# length - +ve numeric; sets length of line from start position
# e - +ve numeric; smaller than input length; determines the endpoint of the recursive calls, relying on the function to incrementally decrease the size of "length"
# grad - logical; if TRUE, a colour gradient will be implemented, colouring the line between brown and light green based on input length
# lwd - +ve numeric; sets width of line
# adj_len - numeric vector; modulates length of subsequent line segments from new start position
# adj_ang - radian; modulates angle of subsequent line segments from new start position
#
# Returns: -
#
########
### Calls turtle to draw first line, captures end position for next call
end_position <- turtle(start_position, direction, length, grad, lwd)
### Calls itself recursively with two sets of altered parameters, one at a time, until length < e
if (length > e){
tree(end_position, direction+adj_ang, length*adj_len[1], e, grad, lwd, adj_len, adj_ang)
tree(end_position, direction-adj_ang, length*adj_len[2], e, grad, lwd, adj_len, adj_ang)}
}
draw_tree <- function() {
### Calls and visualises the tree function with its default parameters
#
# Arguments: -
#
# Returns: -
#
########
### Clears all graphics; opens a new, blank plot
graphics.off(); frame()
### Calls and thus visualises the spiral function with default parameters
tree()
}
# Question 29
fern <- function(start_position=c(.5,0), direction=pi/2, length=.1, e=0.001, grad=FALSE, lwd=1, adj_len=c(0.38,0.87), adj_ang=pi/4) {
### Calls the turtle function with the input parameters, then calls itself recursively with two sets of altered parameters, one at a time, until length decreases below input value of e
#
# Arguments:
# start_position - coordinates vector; first coordinate for the line segment
# direction - radian; sets angle of the line segment from start position
# length - +ve numeric; sets length of line from start position
# e - +ve numeric; smaller than input length; determines the endpoint of the recursive calls, relying on the function to incrementally decrease the size of "length"
# grad - logical; if TRUE, a colour gradient will be implemented, colouring the line between brown and light green based on input length
# lwd - +ve numeric; sets width of line
# adj_len - numeric vector; modulates length of subsequent line segments from new start position
# adj_ang - radian; modulates angle of subsequent line segments from new start position
#
# Returns: -
#
########
### Calls turtle to draw first line, captures end position for next call
end_position <- turtle(start_position, direction, length, grad, lwd)
### Calls itself recursively with two sets of altered parameters, one at a time, until length < e
if (length > e){
fern(end_position, direction+adj_ang, length*adj_len[1], e, grad, lwd, adj_len, adj_ang)
fern(end_position, direction, length*adj_len[2], e, grad, lwd, adj_len, adj_ang)}
}
draw_fern <- function() {
### Calls and visualises the fern function with its default parameters
#
# Arguments: -
#
# Returns: -
#
########
### Clears all graphics; opens a new, blank plot
graphics.off(); frame()
### Calls and thus visualises the fern function with default parameters
fern()
}
# Question 30
fern2 <- function(start_position=c(.5,0), direction=pi/2, length=.1, dir=1, e=0.001, grad=FALSE, lwd=1, adj_len=c(0.38,0.87), adj_ang=pi/4) {
### Calls the turtle function with the input parameters, then calls itself recursively with two sets of altered parameters, one at a time, until length decreases below input value of e
#
# Arguments:
# start_position - coordinates vector; first coordinate for the line segment
# direction - radian; sets angle of the line segment from start position
# length - +ve numeric; sets length of line from start position
# e - +ve numeric; smaller than input length; determines the endpoint of the recursive calls, relying on the function to incrementally decrease the size of "length"
# grad - logical; if TRUE, a colour gradient will be implemented, colouring the line between brown and light green based on input length
# lwd - +ve numeric; sets width of line
# adj_len - numeric vector; modulates length of subsequent line segments from new start position
# adj_ang - radian; modulates angle of subsequent line segments from new start position
#
# Returns: -
#
########
### Calls turtle to draw first line, captures end position for next call
end_position <- turtle(start_position, direction, length, grad, lwd)
### Calls itself recursively with two sets of altered parameters, one at a time, until length < e
if (length > e){
fern2(end_position, direction+dir*adj_ang, length*adj_len[1], dir, e, grad, lwd, adj_len, adj_ang)
fern2(end_position, direction, length*adj_len[2], -dir, e, grad, lwd, adj_len, adj_ang)}
}
draw_fern2 <- function() {
### Calls and visualises the fern2 function with its default parameters
#
# Arguments: -
#
# Returns: -
#
########
### Clears all graphics; opens a new, blank plot
graphics.off(); frame()
### Calls and thus visualises the fern2 function with default parameters
fern2()
}
################################################################################
############################### CHALLENGES #####################################
################################################################################
# Challenge question A
Challenge_A <- function() {
### Plots a time-series graph of mean species richness over 'nGens' generations simulated 'nSims' times with the neutral theory model, starting with a community of 'nPop' individuals all of different species (blue) and all the same species (red) and an estimate of generation at which equilibrium is reached
#
# Arguments: -
#
# Returns: -
#
########
### Clear all graphics
graphics.off()
### Set parameters: number of simulations, generations & individuals, and confidence interval to be used in plotting
nSims <- 500; nGens <- 80; nPop <- 100; CI <- 97.2
### Creates matrix of 100 starting populations of each max and min richness
sims <- matrix(c( rep(1:nPop, nSims), rep(rep(1,nPop), nSims)), nrow=nPop)
### Creates vectors of means and SDs of species richness for both communities, and populates them by running each simulation 'nGen' times
mean_SRs_max <- nPop; sd_SRs_max <- sd(1:nPop)
mean_SRs_min <- 1; sd_SRs_min <- 0.1 #should be 0 but avoids warning)
for (gen in 1:nGens){
# runs neutral_generation_speciation on each column, replace=TRUE
sims <- apply(sims, 2, neutral_generation_speciation, speciation_rate=.1)
# make vector of species richness of each column
SRs_vect <- sapply(1:ncol(sims), function(x) species_richness(sims[, x]))
# finds the means and SDs of species richness for the two communities
mean_SRs_max <- c(mean_SRs_max, mean(SRs_vect[1:nSims]))
mean_SRs_min <- c(mean_SRs_min, mean(SRs_vect[(nSims+1):(nSims*2)]))
sd_SRs_max <- c(sd_SRs_max, sd(SRs_vect[1:nSims]))
sd_SRs_min <- c(sd_SRs_min, sd(SRs_vect[(nSims + 1):(nSims * 2)])) }
### Uses input confidence interval and calculated SDs to find margins of error
alpha <- 1-CI/100
marg_err_max <- abs(qnorm(alpha/2)*(sd_SRs_max/sqrt(nPop)))
marg_err_min <- abs(qnorm(alpha/2)*(sd_SRs_min/sqrt(nPop)))
### Finds first generation where margins of error between the two communities overlap; estimate for equilibrium being reached, used by abline in plot
for (gen in 1:nGens){
if((mean_SRs_max[gen] - marg_err_max[gen]) -
(mean_SRs_min[gen] + marg_err_min[gen]) < 0){ equil_gen <- gen; break; }}
### Plots those vectors populated against the generations they were taken from
plot(0:nGens, mean_SRs_max,
# labels axes and graph as a whole
xlab="Generations Elapsed", ylab="Mean Species Richness",
main=paste("Mean Species Richness of",nSims,"Simulations of \n Maximally and Minimally Diverse Communities with a \n Speciation Rate of 0.1 over",nGens,"Generations"),
# adds line, removes datapoints, sets its colour and width
col="mediumblue", type="l", lwd=3,
# disable numerical axes labels, sets edge of axes to origin
xaxt="n", yaxt="n", xaxs="i", yaxs="i", ylim=c(0,nPop), xlim=c(-.5,nGens))
# plots min_species_richness against generations_vect, formats line
lines(0:nGens, mean_SRs_min, col="maroon", lwd=3)
# adds error bars for each line based on pre-calculated margins of error at the (seemingly arbitrary) 0.028 (or 1-CI/100) significance level
arrows(0:nGens, mean_SRs_max + marg_err_max,
0:nGens, mean_SRs_max - marg_err_max, angle=90, code=3, length=0.02)
arrows(0:nGens, mean_SRs_min + marg_err_min,
0:nGens, mean_SRs_min - marg_err_min, angle=90, code=3, length=0.02)
# adds line and explanatory text at estimated point of equilibrium
abline(v=equil_gen, lwd=2, col="darkgreen", lty="dashed")
text(equil_gen, nPop/1.3, "Point of Equilibrium", pos=2, offset=0.5, srt=90)
# adds legend for the lines in topright corner
legend("topright", col=c("mediumblue","maroon"), lwd=3, cex=0.8,
legend=c("Maximum Initial Richness","Minimum Initial Richness"))
# adds custom numerical axes labels, and minor axis ticks
axis(1, seq(0, nGens, by=10), las=1, labels=TRUE)
axis(2, seq(0, nPop, by=10), las=2, labels=TRUE)
axis(1, seq(0, nGens, by=2), lwd.ticks=.3, labels=FALSE)
axis(2, seq(0, nPop, by=2), lwd.ticks=.3, labels=FALSE)
}
# Challenge question B
Challenge_B <- function() {
### Plots a time-series graph of mean species richness over 'nGens' generations simulated 'nSims' times with the neutral theory model, starting with 'nSCs' communities of 'nPop' individuals with varying degrees of initial species richness
#
# Arguments: -
#
# Returns: -
#
########
### Clear all graphics
graphics.off()
### Set parameters: number of simulations, generations, starting conditions and individuals/community
nSims <- 150; nGens <- 50; nSCs <- 11; nPop <- 100 #must be multiple of nSCs-1
# Calculates species richness gap between starting conditions (except first)
SR_interval <- nPop/(nSCs-1)
### Preallocates matricies to use for simulations, and to store means
sims <- matrix(0, nrow=nPop, ncol=nSCs*nSims)
means <- matrix(0, nrow=nGens+1, ncol=nSCs)
means[1,] <- c(1, seq(SR_interval, nPop, length=nSCs-1))
### Populates 1st & last nSims rows w/ communities of min&max species richness
sims[,c(1:nSims)] <- rep(rep(1, nPop), nSims)
sims[,((nSCs-1)*nSims+1):(nSCs*nSims)] <- rep(1:nPop, nSims)
### Populates middle rows with communities of intermediate species richness
for (SC in 1:(nSCs-2)){
for (col in 1:nSims){
# multiple rounds of non-replacement sampling, until length(indivs)>= nSim
indivs <- NULL; while (length(indivs) < nPop){
indivs <- c(indivs, sample(1:(SC * SR_interval), SC * SR_interval)) }
# input into apropriate column
sims[,(SC * nSims + col)] <- indivs[1:nPop] } }
### Creates dataframe of means of species richness for all communities, and populates them by running each simulation 'nGen' times
for (gen in 1:nGens){
# runs neutral_generation_speciation on each column, replace=TRUE
sims <- apply(sims, 2, neutral_generation_speciation, speciation_rate=.1)
# make vector of species richness of each column
SRs_vect <- sapply(1:ncol(sims), function(x) species_richness(sims[, x]))
# finds the means and SDs of species richness for the two communities
for (SC in 1:nSCs){
means[(1+gen), SC] <- mean(SRs_vect[(1+nSims*(SC-1)):(nSims*SC)]) } }
### Plots those vectors populated against the generations they were taken from
# creates palette of colours for the different lines
palette <- rainbow(nSCs)
# plots the first line
plot(0:nGens, means[,1],
# labels axes and graph as a whole
xlab="Generations Elapsed", ylab="Mean Species Richness",
main=paste("Mean Species Richness of",nSims,"Simulations of \n Varyingly Diverse Communities with a \n Speciation Rate of 0.1 over",nGens,"Generations"),
# adds line, removes datapoints, sets its colour and width
col=palette[1], type="l", lwd=2,
# disable numerical axes labels, sets edge of axes to origin
xaxt="n", yaxt="n", xaxs="i", yaxs="i", ylim=c(0,nPop), xlim=c(-.5,nGens))
# plots other means columns against generation number, formats line
for (SC in 2:nSCs){
lines(0:nGens, means[,SC], col=palette[SC], lwd=2) }
# adds legend for the lines in topright corner
legend("topright", col=palette[1:nSCs], lwd=3, cex=0.8, legend=paste(
"Starting Richness of", c(1, seq(SR_interval, nPop, length=nSCs-1))))
# adds custom numerical axes labels, and minor axis ticks
axis(1, seq(0, nGens, by=5), las=1, labels=TRUE)
axis(2, seq(0, nPop, by=10), las=2, labels=TRUE)
axis(1, seq(0, nGens, by=1), lwd.ticks=.3, labels=FALSE)
axis(2, seq(0, nPop, by=2), lwd.ticks=.3, labels=FALSE)
}
# Challenge question C
Challenge_C <- function() {
### Analyses rda files generated from run_cluster.sh, finding the generation at which equilibrium was reached for each size of community
#
# Arguments: -
#
# Returns: character string; explanatory statement
#
########
### Clear all graphics; sets num of iterations and popsizes to plot & analyse
graphics.off()
iter_num <- 100
popsizes <- c(500, 1000, 2500, 5000)
### Creates empty list and populates it with lists of rda files' objects
combined_results <- list()
for (i in 1:iter_num){
# loads each data file based on iter number
fname <- paste0("../../data/output_files/output_file_", i, ".rda")
load(file=fname)
# creates list of objects from data file, adds to list of lists
obj_list <- list(richness_vect, interval_rich, burn_in_generations)
combined_results[[length(combined_results)+1]] <- obj_list }
### Finds number of simulations per size, and creates blank list for ouput
sims_per_size <- iter_num / length(popsizes)
mean_SR_ls <- list()
### Finds information for each set of popsizes
for (popsize in 0:(length(popsizes)-1)) {
# corresponding first and last (by index) simulations for that popsize
start <- sims_per_size * popsize + 1
end <- sims_per_size * (popsize + 1)
# finds burn-in period and richness interval
burn_in <- combined_results[[start + 1]][[3]]
richness_int <- combined_results[[start + 1]][[2]]
# hence generates the vector of corresponding generation numbers
gen_vect <- seq(richness_int, burn_in, by=richness_int)
# creates dataframe of each size's richness_vect, and finds mean of each col
popsize_SRs <- lapply(combined_results[start:end], `[[`, 1)
SR_df <- as.data.frame(do.call(rbind, popsize_SRs))
mean_SR <- sapply(1:ncol(SR_df), function(x) mean(SR_df[, x]))
### Appends vectors of species richness means and generations of each popsize to list of all of them for plotting
mean_SR_ls[[length(mean_SR_ls) + 1]] <- list(log(mean_SR), log(gen_vect+1))}
### Plots mean vectors against generation vectors of each popsize
# creates palette of colours for the different lines
palette <- rainbow(length(popsizes))
# finds upper limit of largest popsize's data - will have highest of each
round_to <- signif(popsizes[length(popsizes)]/100, 1)
ymax <- ceiling(max(mean_SR_ls[[length(popsizes)]][[1]]))
xmax <- ceiling(max(mean_SR_ls[[length(popsizes)]][[2]]))
# finds lower limit of smallest popsize's data - will have lowest of each
ymin <- floor(min(mean_SR_ls[[1]][[1]]))
xmin <- ceiling(min(mean_SR_ls[[1]][[2]]))
# plots the first line
plot(mean_SR_ls[[1]][[2]], mean_SR_ls[[1]][[1]],
# labels axes and graph as a whole
xlab="Log of Generations Elapsed", ylab="Log of Mean Species Richness",
main=paste("Mean Species Richness of",sims_per_size,"Simulations of \n Varyingly-Sized Communities"),
# adds line, removes datapoints, sets its colour and width
col=palette[1], type="l", lwd=2,
# disable numerical axes labels, sets edge of axes to origin, & sets upper limits assuming the largest popsize has the largest values
yaxs="i", xaxs="i", ylim=c(ymin,ymax), xlim=c(xmin,xmax), yaxt="n", xaxt="n")
# plots other means columns against generation number, formats line
for (i in 2:length(popsizes)) {
lines(mean_SR_ls[[i]][[2]], mean_SR_ls[[i]][[1]], col=palette[i], lwd=2) }
# adds legend for the lines in topright corner
legend("bottomright", col=palette[1:length(popsizes)], lwd=3, cex=0.8,
legend=paste("Population Size of", popsizes))
# adds custom numerical axes labels, and minor axis ticks
axis(1, seq(xmin, xmax, by=1), las=1, labels=TRUE)
axis(2, seq(ymin, ymax, by=1), las=2, labels=TRUE)
axis(1, seq(xmin, xmax, by=0.2), lwd.ticks=.3, labels=FALSE)
axis(2, seq(ymin, ymax, by=0.1), lwd.ticks=.3, labels=FALSE)
### Estimates pont at which equilibrium has been reached for each popsize
for (pop in 1:length(popsizes)){
for (SR in 101:length(mean_SR_ls[[pop]][[1]])){
if (mean_SR_ls[[pop]][[1]][SR] < min(mean_SR_ls[[pop]][[1]][(SR-100):(SR-1)])){
print(paste("estimated generation of equilibrium for community of size", popsizes[pop], "is", (SR-50)*richness_int))
break;} } }
### Return an explanatory statement
return(paste("From the curves we can see all communities have equilibrated by aproximately e^7.25, or", signif(exp(7.25), 3), "generations from the start. We can estimate more precisely for each community by finding where the data levels out, eg finding the first datapoint lower than a trailing average of the 50 previous datapoints as above. From these results, we gather that 2*popsize is a sufficient burn-in period here; 8*popsize is unnecessarily large, albeit not a significant issue."))
}
# Challenge question D
Challenge_D <- function() {
### Plots a bargraph of octave class frquencies, representing species abundance distribution from the coalescence model for a community of 100 individuals.
#
# Arguments: -
#
# Returns: character string; statement answering "How many CPU hours were used on the coalescence simulation and how many on the cluster do an equivalent set of simulations (take?)? Why were the coalescence simulations so much faster?"
#
########
### Clear all graphics
graphics.off()
### Assigns community size, speciation rate, and sim number variables
J <- 100; v <- 0.1; nSims <- 1000
### Calculates theta and creates a vector for aggregating octaves of each sim
theta <- v * (J - 1) / (1 - v)
oct_total <- c(0,0)
for (i in 1:nSims){
N <- J
### Creates lineages (size of community) and abundances (to add abundances to) vectors
lineages <- rep(1, J)
abundances <- vector()
### While multiple lineages exist, either coalesence or speciation occurs
while (N > 1){
lin_i <- sample(length(lineages), 2)
if (runif(1) < theta/(theta+N-1)){
abundances <- c(abundances, lineages[lin_i[1]])
}else{
lineages[lin_i[2]] <- lineages[lin_i[1]] + lineages[lin_i[2]] }
### Each loop, the lineages object added to abundances/another lineages object is removed
lineages <- lineages[-lin_i[1]]
N <- N - 1 }
### Final lineages object added to abundance, octaves taken
abundances <- c(abundances, lineages)
oct_total <- sum_vect(oct_total, octaves(abundances))}
### Finds the mean octave frequency, the highest y value rounded up, and the abundance ranges corresponding with each octave class, all for use in plotting
mean_oct <- oct_total/nSims
ymax <- ceiling(max(mean_oct))
x_vals <- 1:length(mean_oct)
x_vals[-1] <- paste0(2^(x_vals[-1]-1), "-", 2^x_vals[-1]-1)
### Creates barplot of the mean species abundance distriubtion, as octaves, for the equilibrium period
barplot(mean_oct, names.arg=x_vals,
# labels axes and graph as a whole
xlab="Species Abundance Ranges", ylab="Mean Abundance Range Frequency",
main="Estimated Species Abundance Distribution as Octaves",
# disable numerical axes labels, sets edge of axes to origin, sets limit
yaxt ="n", yaxs="i", ylim=c(0, ymax))
# adds custom numerical x axis labels and minor ticks
axis(2, seq(0, ymax, by=1), las=1, labels=TRUE)
axis(2, seq(0, ymax, by=0.2), lwd.ticks=.3, labels=FALSE)
return("I used a cluster time of 11 hours, whereas this coalescence simulation is near-instantaneous for the same parameters; however, I'd argue they are not equivalent (and in fact, as fundamentally different methods, cannot be equivalent). The simulations run on the cluster and question 16 are averaged over a vast period of time, wheareas this simulation has but one output. As such, there is much higher fluctuation in the results of this coalescence simulation.
Running this coalesence simulation many times and taking an average, as I did, leads to a much more stable result, and makes it about 10x faster than question 16 executing the same number of simulations.
Using this technique, we don't simulate or track a species that will go extinct, since we're effectively starting from the end and tracing back to the speciation events, hence there is no wasted processing time on that. There is also no wasted (assuming we're not interested in it) burn-in period, and we're generating less data to analyse and calling fewer functions.")
}
# Challenge question E
Challenge_E <- function(){
### Generates and plots a series of fractal shapes based off different parameters of the chaos_game function, and returns an explanatory statement.
#
# Arguments: -
#
# Returns: character string; statement describing and explaining the generated shapes
#
########
### Clear all graphics; creates gradiated palette from black to blue
graphics.off()
pal <- colorRampPalette(c("black", "blue", "red", "orange"))(20)
### Create and manipulate vectors of coordinates for use
equi_pts <- c(c(0,0), c(10,0), c(5,5*sqrt(3))) #equilateral triangle
mid_pt <- c(5, 5 / 3 * sqrt(3)) #equilateral triangle center
half_mid_pts <- (equi_pts+mid_pt)/2 #halfway between verticies and midpoint
mid_equi_pts <- (equi_pts+c(equi_pts[5:6], equi_pts[1:4]))/2 #halfway points between verticies
### Creates list of vectors using chaos_game() function, for later plotting
matricies <- list(
### 3 versions of Sierpinski Gasket with incramental colouring of points & different starting points. Lower reps numeber needed to stop overwriting
chaos_game(points=matrix(c(equi_pts), byrow=T, ncol=2), dist=1/2, plot=F, X=c(0,0), reps=5000), #startpoint = bottom left vertex
chaos_game(points=matrix(c(equi_pts), byrow=T, ncol=2), dist=1/2, plot=F, X=c(mid_pt), reps=5000), #startpoint = middle of equilateral triangle
chaos_game(points=matrix(c(equi_pts), byrow=T, ncol=2), dist=1/2, plot=F, X=c(10,5*sqrt(3)), rep=5000), #startpoint = x coord of right vertex and y coord of top
### Sierpinski Gasket - dist 1/2 only as explained in text
chaos_game(points=matrix(c(equi_pts), byrow=T, ncol=2), dist=1/2, plot=F),
### Like Sierpinski Gasket, but with points half way between each pair of verticies; identical but only when on 1/3 rather than 1/2
chaos_game(points=matrix(c(mid_equi_pts, equi_pts), byrow=T, ncol=2),
dist=1/3, plot=F),
# As above but with distance of 3/7
chaos_game(points=matrix(c(mid_equi_pts, equi_pts), byrow=T, ncol=2),
dist=3/7, plot=F),
### Like Sierpinski Gasket but with extra point at center of triangle
chaos_game(points=matrix(c(equi_pts, mid_pt), byrow=T, ncol=2),
dist=1/2, plot=F),
# As above but with distance of 3/7
chaos_game(points=matrix(c(equi_pts, mid_pt), byrow=T, ncol=2),
dist=3/7, plot=F),
### Equilateral with points halfway between midpoint and each vertex
chaos_game(points=matrix(c(half_mid_pts, equi_pts), byrow=T, ncol=2),
dist=4/11, plot=F),
### No equilateral triangle points; only points halfway between midpoint and each vertex AND points half way between each pair of verticies
chaos_game(points=matrix(c(half_mid_pts, mid_equi_pts), byrow=T, ncol=2),
X=mid_pt,dist=1/3, plot=F),
# As above but with extra point at center
chaos_game(points=matrix(c(half_mid_pts, mid_equi_pts, mid_pt), byrow=T, ncol=2), X=mid_pt, dist=1/3, plot=F),
# As above but with distance of 4/11
chaos_game(points=matrix(c(half_mid_pts, mid_equi_pts, mid_pt), byrow=T, ncol=2), X=mid_pt, dist=4/11, plot=F))
### Sets structure of multiplot, populates it one plot at a time (tollerates any number of plots)
dev.new(width = 10, height = 12)
par(mfcol = c(ceiling(length(matricies) / 3), 3))
for (i in 1:length(matricies)) {
# calculates and inputs the row and col number for the plot in in multiplot
row <- ceiling(i / 3)
if(i %% 3 == 0){ col <- 3
}else{ col <- i %% 3 }
par(mfg = c(row, col))
### First 3, with different starting points, are plotted using gradient of colours varying for each 1/20th of the datapoints in order
if (i<4){
subset_size <- nrow(matricies[[i]]) / 20
# plots first subset black
plot(matricies[[i]][1:subset_size,], cex = .001, pch = 20, axes = FALSE, ann = FALSE)
for (subset in 2:20){ # layers on other subsets, coloured with palette
subset_range <- ((subset-1)*subset_size+1):(subset*subset_size)
points(matricies[[i]][subset_range,], cex=.001, pch=20, col=pal[subset])
### Else plots whole matrix, with small black dots and no axes or labels
}}else{ plot(matricies[[i]], cex=.001, pch=20, axes=FALSE, ann=FALSE, col=pal[(i-2)*2]) }}
return("Changing the startpoint (X) seems to make no difference; it leads to a few points outside the shape but they quickly find their way to it. When a seed is set, the distribution of dots is, after the first few, identical regardless of X. (See shapes 1-3, enlarge to make clearer)
Decreasing distance from 0.5, triangular fractals' small constituent units shrink and move towards the closest vertex; increasing, they start to overlap and converge on eachother. (See shapes 1-4 and 7)
Adding extra points, and different combinations of those points, lead to various other fractals, most of which require a distance of rational, repeating decimals close to but less than 0.5. (See shapes 5-12, and the annotated code for more details)")
}
draw_shape <- function(fun, adj_len, adj_ang=pi/4, grad=TRUE, e=0.001, length=.1, lwd=1){
### Calls and visualises the whicheevr of the "tree", "fern" or "fern2" functions are input with altered parameters
#
# Arguments:
# fun - function; "tree", "fern" or "fern2" function to be used
# adj_len - numeric vector; modulates length of subsequent line segments from new start position
# adj_ang - radian; modulates angle of subsequent line segments from new start position
# grad - logical; if TRUE, a colour gradient will be implemented, colouring the line between brown and light green based on input length
# e - +ve numeric; smaller than input length; determines the endpoint of the recursive calls, relying on the function to incrementally decrease the size of "length"
# length - +ve numeric; sets length of line from start position
# lwd - +ve numeric; sets width of line
#
# Returns: -
#
########
### Opens a new, blank plot
frame()
### Calls and thus visualises the fern function with a combination of default parameters and arguments of the draw_shape function
fun(c(.5, 0), pi/2, length, e=e, grad=grad, lwd=lwd, adj_len=adj_len, adj_ang=adj_ang)
}
# Challenge question F
Challenge_F <- function() {
### Generates and plots a series of fractal shapes resembling organisms based off different parameters of the draw_shape function, and returns a statement.
#
# Arguments: -
#
# Returns: character string; statement explaining the effects of decreasing the input e
#
########
### Clear graphics
graphics.off()
### Open new, wide 2x4 plot, and plots all 8 shapes
dev.new(width = 9, height = 6)
par(mfcol=c(2,4))
# Connifer
par(mfg=c(1,1))
draw_shape(tree, c(0.61,0.61), pi/4, length=.14, grad=TRUE, lwd=5)
title(main="Connifer", line=-6, cex.main=2)
# Bush
par(mfg=c(1,2))
draw_shape(tree, c(0.64,0.64), pi/2.5, length=.14, grad=TRUE, lwd=3)
title(main="Bush", line=-6, cex.main=2)
# Shrimptail
par(mfg=c(1,3))
draw_shape(tree, c(0.78,0.48), pi/6, length=.13, grad=FALSE, lwd=.4)
title(main="Shrimp Tail", line=-6, cex.main=2)
# Trilobyte
par(mfg=c(1,4))
draw_shape(tree, c(0.94,0.31), pi/8, length=.1, grad=FALSE, lwd=.2)
title(main="Trilobite", line=-6, cex.main=2, adj=.1)
# Vines
par(mfg=c(2,1))
draw_shape(tree, c(0.29,0.83), pi/2.5, length=.23, grad=TRUE, lwd=2)
title(main="Vines", line=-6, cex.main=2, adj=.7)
# Wheat
par(mfg=c(2,2))
draw_shape(fern, c(0.35,0.85), pi/8, length=.07, grad=FALSE, lwd=.4)
title(main="Wheat", line=-6, cex.main=2)
# Italian Cyprus Tree
par(mfg=c(2,3))
draw_shape(fern2, c(0.38, 0.88), pi / 8, length=.06, grad=TRUE, lwd=3)
title(main="Italian Cyprus Tree", line=-6, cex.main=2)
# Christmas Tree
par(mfg=c(2,4))
draw_shape(fern2, c(0.43,0.85), pi/2, length=.08, grad=TRUE, lwd=3)
title(main="Merry Christmas", line=-6, cex.main=2)
return("Time taken and level of detail added seems to increase exponentially as e is lowered. This is because as e decreases, additional levels of detail (sub-pinnules of the fern if you will) are added, and the surface area upon which they're added is exponentially larger than at any of the previous levels.")
} |
.dashEditorComponents_js_metadata <- function() {
deps_metadata <- list(`dash_editor_components` = structure(list(name = "dash_editor_components",
version = "0.0.2", src = list(href = NULL,
file = "deps"), meta = NULL,
script = 'dash_editor_components.min.js',
stylesheet = NULL, head = NULL, attachment = NULL, package = "dashEditorComponents",
all_files = FALSE), class = "html_dependency"),
`dash_editor_components` = structure(list(name = "dash_editor_components",
version = "0.0.2", src = list(href = NULL,
file = "deps"), meta = NULL,
script = 'dash_editor_components.min.js.map',
stylesheet = NULL, head = NULL, attachment = NULL, package = "dashEditorComponents",
all_files = FALSE), class = "html_dependency"),
`dash_editor_components` = structure(list(name = "dash_editor_components",
version = "2", src = list(href = NULL,
file = "deps"), meta = NULL,
script = NULL,
stylesheet = 'editor.css', head = NULL, attachment = NULL, package = "dashEditorComponents",
all_files = FALSE), class = "html_dependency"))
return(deps_metadata)
}
| /R/internal.R | permissive | waralex/dash-editor-components | R | false | false | 1,043 | r | .dashEditorComponents_js_metadata <- function() {
deps_metadata <- list(`dash_editor_components` = structure(list(name = "dash_editor_components",
version = "0.0.2", src = list(href = NULL,
file = "deps"), meta = NULL,
script = 'dash_editor_components.min.js',
stylesheet = NULL, head = NULL, attachment = NULL, package = "dashEditorComponents",
all_files = FALSE), class = "html_dependency"),
`dash_editor_components` = structure(list(name = "dash_editor_components",
version = "0.0.2", src = list(href = NULL,
file = "deps"), meta = NULL,
script = 'dash_editor_components.min.js.map',
stylesheet = NULL, head = NULL, attachment = NULL, package = "dashEditorComponents",
all_files = FALSE), class = "html_dependency"),
`dash_editor_components` = structure(list(name = "dash_editor_components",
version = "2", src = list(href = NULL,
file = "deps"), meta = NULL,
script = NULL,
stylesheet = 'editor.css', head = NULL, attachment = NULL, package = "dashEditorComponents",
all_files = FALSE), class = "html_dependency"))
return(deps_metadata)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/errors_vs_ntaxa.R
\name{errors_vs_ntaxa}
\alias{errors_vs_ntaxa}
\title{Errors_vs_ntaxa}
\usage{
errors_vs_ntaxa(n_replicates = 2)
}
\arguments{
\item{n_replicates}{number of replicates}
}
\value{
nothing
}
\description{
Errors_vs_ntaxa
}
\author{
Giovanni Laudanno
}
| /man/errors_vs_ntaxa.Rd | no_license | Giappo/PirouetteExamples | R | false | true | 346 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/errors_vs_ntaxa.R
\name{errors_vs_ntaxa}
\alias{errors_vs_ntaxa}
\title{Errors_vs_ntaxa}
\usage{
errors_vs_ntaxa(n_replicates = 2)
}
\arguments{
\item{n_replicates}{number of replicates}
}
\value{
nothing
}
\description{
Errors_vs_ntaxa
}
\author{
Giovanni Laudanno
}
|
"For the exercise I assume that the matrix supplied is always invertible.
That means it is always a square numerical matrix whose determinant != 0.
Because of this assumption, I will not be checking the matrix before using it."
## get/set a matrix and its inverse.
makeCacheMatrix <- function(x = matrix()) {
"The function returns a list representing a CacheMatrix structure. The
variables x (the matrix) and inverse (the matrix inverse) are defined, but
aren't included in the returned list. This is because they're meant to be
modified through the get/set functions, not direct manipulation."
inverse <- NULL #inverse starts off as NULL
# defining the matrix get and set functions
get <- function() x # simply returns the matrix
set <- function(y) {
"Changes the value of x to the given matrix y.
Also reverts the value of inverse to NULL. The inverse will have to be
recalculated after executing the set function."
x <<- y
inverse <<- NULL
}
#defining the inverse get and set functions
setinverse <- function(i) inverse <<- i
getinverse <- function() inverse
"NOTE: It's important to note that the get/set functions are taking
advantage of R's lexical scoping. In other words, the get/set
functions can access the inverse and x variables because R considers their
DEFINING environment (in this case the makeCacheMatrix function definition)
rather than their calling environment (the global environment, if the
function is called from the console) when searching for the associated
values."
#return a CacheMatrix
list(get=get,
set=set,
setinverse=setinverse,
getinverse=getinverse)
}
## Return a matrix that is the inverse of the 'x' CacheMatrix
cacheSolve <- function(x, ...) {
inverse <- x$getinverse() #get the value of inverse
if (is.null(inverse)) {
"If the value DOESN'T exist, calculate it using the solve function.
This implies that if the value exists, the solve function will not be
called unnecessarily."
inverse <- solve(x$get(), ...) #calculate the value...
x$setinverse(inverse) #... and cache it in the CacheMatrix.
}
inverse #return inverse
}
| /cachematrix.R | no_license | vmlm/ProgrammingAssignment2 | R | false | false | 2,323 | r | "For the exercise I assume that the matrix supplied is always invertible.
That means it is always a square numerical matrix whose determinant != 0.
Because of this assumption, I will not be checking the matrix before using it."
## get/set a matrix and its inverse.
makeCacheMatrix <- function(x = matrix()) {
"The function returns a list representing a CacheMatrix structure. The
variables x (the matrix) and inverse (the matrix inverse) are defined, but
aren't included in the returned list. This is because they're meant to be
modified through the get/set functions, not direct manipulation."
inverse <- NULL #inverse starts off as NULL
# defining the matrix get and set functions
get <- function() x # simply returns the matrix
set <- function(y) {
"Changes the value of x to the given matrix y.
Also reverts the value of inverse to NULL. The inverse will have to be
recalculated after executing the set function."
x <<- y
inverse <<- NULL
}
#defining the inverse get and set functions
setinverse <- function(i) inverse <<- i
getinverse <- function() inverse
"NOTE: It's important to note that the get/set functions are taking
advantage of R's lexical scoping. In other words, the get/set
functions can access the inverse and x variables because R considers their
DEFINING environment (in this case the makeCacheMatrix function definition)
rather than their calling environment (the global environment, if the
function is called from the console) when searching for the associated
values."
#return a CacheMatrix
list(get=get,
set=set,
setinverse=setinverse,
getinverse=getinverse)
}
## Return a matrix that is the inverse of the 'x' CacheMatrix
cacheSolve <- function(x, ...) {
inverse <- x$getinverse() #get the value of inverse
if (is.null(inverse)) {
"If the value DOESN'T exist, calculate it using the solve function.
This implies that if the value exists, the solve function will not be
called unnecessarily."
inverse <- solve(x$get(), ...) #calculate the value...
x$setinverse(inverse) #... and cache it in the CacheMatrix.
}
inverse #return inverse
}
|
load_data <- read.table(file = "household_power_consumption.txt",
sep = ";",
skip = 66637,
nrows = 2880,
na.strings= "?")
dataNames <- colnames(read.table("household_power_consumption.txt",
sep = ";",
nrow = 1,
header = TRUE))
names(load_data) <- dataNames
str(load_data)
dateTime <- paste(load_data$Date, load_data$Time)
weekDay <- strptime(dateTime, format ='%d/%m/%Y %H:%M:%S')
str(weekDay)
png("plot3.png", width=480, height=480)
plot(weekDay, load_data$Sub_metering_1,
type="l",
xlab="",
ylab="Energy sub metering")
lines(weekDay, load_data$Sub_metering_2, col="red")
lines(weekDay, load_data$Sub_metering_3, col="blue")
legend("topright",
lty=1,
col=c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
| /plot3.R | no_license | d4t4/ExData_Plotting1 | R | false | false | 789 | r | load_data <- read.table(file = "household_power_consumption.txt",
sep = ";",
skip = 66637,
nrows = 2880,
na.strings= "?")
dataNames <- colnames(read.table("household_power_consumption.txt",
sep = ";",
nrow = 1,
header = TRUE))
names(load_data) <- dataNames
str(load_data)
dateTime <- paste(load_data$Date, load_data$Time)
weekDay <- strptime(dateTime, format ='%d/%m/%Y %H:%M:%S')
str(weekDay)
png("plot3.png", width=480, height=480)
plot(weekDay, load_data$Sub_metering_1,
type="l",
xlab="",
ylab="Energy sub metering")
lines(weekDay, load_data$Sub_metering_2, col="red")
lines(weekDay, load_data$Sub_metering_3, col="blue")
legend("topright",
lty=1,
col=c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
setwd("/media/herimanitra/DONNEES/IPM_sentinelle/sentinel_hrmntr 291115/Sentinel")
#whether to run on local for debugging and development purpose
#or to pull data directly from the server:
remote_server=T;writing_to_disk=T
#whether to run report or to run shiny:
reporting =T;
#load required packages:
source("libraries.R");#source("var_conversion.R")
if ( exists("PaluConf")==F ) #to speed up things
{
if ( remote_server==TRUE ) {
# cat("connection to the server and read data using dplyr to select views and tables:\n")
sentinel <- src_postgres(dbname="sentinel",
host = "172.16.0.230",
user = "cnx_florian",
password = "sigflorianipm")
data_iri_env<-src_postgres(dbname="data_iri",
host="172.16.0.230",
port=5432,
user="cnx_florian",
password="sigflorianipm")
# load data
PaluConf=fread("data/PaluConf.csv")
Consultations=fread("data/Consultations.csv")
SyndF=fread("data/SyndF.csv")
palu_autoch=fread("data/palu_autoch.csv")
Diarrh=fread("data/Diarrh.csv")
Diarrh_feb=fread("data/Diarrh_feb.csv")
ili=fread("data/ili.csv")
pfa=fread("data/pfa.csv")
arbosusp=fread("data/arbosusp.csv")
#tdr_eff=fread("data/tdr_eff.csv")
################################################################"
max_date=max(as.Date(PaluConf$deb_sem,origin="1970-01-01"))
#should accelerate extraction:
PaluConf_tmp= tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_paluconf_format"," WHERE deb_sem>=",
max_date
))
if (dim(PaluConf_tmp)[2]>0)
{
#transform into data.table:
PaluConf_tmp= PaluConf_tmp %>% data.frame() %>% data.table()
#PaluConf_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(PaluConf_tmp,"data/PaluConf_tmp.csv")
PaluConf_tmp=fread("data/PaluConf_tmp.csv")
PaluConf=PaluConf[deb_sem<max_date,]
PaluConf=rbind(PaluConf,PaluConf_tmp)
PaluConf[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(PaluConf,-deb_sem)
# #conversion of variables:
# var_conv(PaluConf,PaluConf_tmp)
# #rbind 02 dataframe:
# PaluConf=PaluConf[deb_sem<max_date,]
# PaluConf=(rbind(PaluConf,PaluConf_tmp))
# setorder(PaluConf,-deb_sem)
}
######################################
max_date=max(as.Date(Consultations$deb_sem,origin="1970-01-01"))
Consultations_tmp= tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_nxconslttotal_format",
" WHERE deb_sem>=",max_date))
if (dim(Consultations_tmp)[2]>0)
{
#transform into data.table:
Consultations_tmp= Consultations_tmp %>% data.frame() %>% data.table()
#Consultations_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(Consultations_tmp,"data/Consultations_tmp.csv")
Consultations_tmp=fread("data/Consultations_tmp.csv")
Consultations=Consultations[deb_sem<max_date,]
Consultations=rbind(Consultations,Consultations_tmp)
Consultations[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(Consultations,-deb_sem)
# #conversion of variables:
# var_conv(Consultations,Consultations_tmp)
# #rbind 02 dataframe:
# Consultations=Consultations[deb_sem<max_date,]
# Consultations=(rbind(Consultations,Consultations_tmp))
# setorder(Consultations,-deb_sem)
}
############################################################
max_date=max(as.Date(SyndF$deb_sem,origin="1970-01-01"))
SyndF_tmp=tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_syndf_format",
" WHERE deb_sem>=",max_date))
if ( dim(SyndF_tmp)[2]>0 )
{
SyndF_tmp= SyndF_tmp %>% data.frame() %>% data.table()
#SyndF_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(SyndF_tmp,"data/SyndF_tmp.csv")
SyndF_tmp=fread("data/SyndF_tmp.csv")
SyndF=SyndF[deb_sem<max_date,]
SyndF=rbind(SyndF,SyndF_tmp)
SyndF[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(SyndF,-deb_sem)
# #transform into data.table:
# SyndF_tmp= SyndF_tmp %>% data.frame() %>% data.table()
# #conversion of variables:
# var_conv(SyndF,SyndF_tmp)
# #rbind 02 dataframe:
# SyndF=SyndF[deb_sem<max_date,]
# SyndF=(rbind(SyndF,SyndF_tmp))
# setorder(SyndF,-deb_sem)
}
################################################################
max_date=max(as.Date(Diarrh$deb_sem,origin="1970-01-01"))
Diarrh_tmp=tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_diarrh_format",
" WHERE deb_sem>=",max_date))
if ( dim(Diarrh_tmp)[2]>0 )
{
Diarrh_tmp= Diarrh_tmp %>% data.frame() %>% data.table()
#Diarrh_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(Diarrh_tmp,"data/Diarrh_tmp.csv")
Diarrh_tmp=fread("data/Diarrh_tmp.csv")
Diarrh=Diarrh[deb_sem<max_date,]
Diarrh=rbind(Diarrh,Diarrh_tmp)
Diarrh[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(Diarrh,-deb_sem)
# #transform into data.table:
# Diarrh_tmp= Diarrh_tmp %>% data.frame() %>% data.table()
# #conversion of variables:
# var_conv(Diarrh,Diarrh_tmp)
# #rbind 02 dataframe:
# Diarrh=Diarrh[deb_sem<max_date,]
# Diarrh=(rbind(Diarrh,Diarrh_tmp))
# setorder(Diarrh,-deb_sem)
}
##########################################################"
max_date= max(as.Date(Diarrh_feb$deb_sem,origin="1970-01-01"))
Diarrh_feb_tmp=tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_diarrhfeb_format",
" WHERE deb_sem>=",max_date))
if ( dim(Diarrh_feb_tmp)[2]>0 )
{
Diarrh_feb_tmp= Diarrh_feb_tmp %>% data.frame() %>% data.table()
#Diarrh_feb_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(Diarrh_feb_tmp,"data/Diarrh_feb_tmp.csv")
Diarrh_feb_tmp=fread("data/Diarrh_feb_tmp.csv")
Diarrh_feb=Diarrh_feb[deb_sem<max_date,]
Diarrh_feb=rbind(Diarrh_feb,Diarrh_feb_tmp)
Diarrh_feb[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(Diarrh_feb,-deb_sem)
# #transform into data.table:
# Diarrh_feb_tmp= Diarrh_feb_tmp %>% data.frame() %>% data.table()
# #conversion of variables:
# var_conv(Diarrh_feb,Diarrh_feb_tmp)
# #remove old obs. and rbind 02 dataframe:
# Diarrh_feb=Diarrh_feb[deb_sem<max_date,]
# Diarrh_feb=unique(rbind(Diarrh_feb,Diarrh_feb_tmp))
# setorder(Diarrh_feb,-deb_sem)
}
max_date=max(as.Date(arbosusp$deb_sem,origin="1970-01-01"))
# Arbosusp
arbosusp_tmp=tbl(sentinel,build_sql("SELECT * FROM ","crosstab_arbosusp_format",
" WHERE deb_sem>=",max_date))
if (dim(arbosusp_tmp)[2]>0)
{
arbosusp_tmp= arbosusp_tmp %>% data.frame() %>% data.table()
#arbosusp_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(arbosusp_tmp,"data/arbosusp_tmp.csv")
arbosusp_tmp=fread("data/arbosusp_tmp.csv")
arbosusp=arbosusp[deb_sem<max_date,]
arbosusp=rbind(arbosusp,arbosusp_tmp)
arbosusp[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(arbosusp,-deb_sem)
}
#
# max_date = max(tdr_eff$deb_sem)
# tdr_eff_tmp= tbl(sentinel,
# build_sql('SELECT "Date" AS "deb_sem","SyndF","TestPalu","Centre2" AS "sites","Annee","Semaine","ArboSusp","GrippSusp","AutrVirResp","NxConsltTotal" FROM ',
# "vue_csb_sms_centre_format", " WHERE 'Date'>=",max_date))
#
# if (dim(tdr_eff_tmp)[2]>0)
# {
# #conversion of dataframe:
# tdr_eff_tmp = tdr_eff_tmp %>% data.frame() %>% data.table()
# #conversion of variables:
# var_conv(tdr_eff,tdr_eff_tmp)
# #rbind 02 dataframe:
# tdr_eff=tdr_eff[deb_sem<max_date,]
# tdr_eff=(rbind(tdr_eff,tdr_eff_tmp))
# setorder(tdr_eff,-deb_sem)
# }
max_date = max(as.Date(ili$deb_sem,origin="1970-01-01"))
ili_tmp=tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_grippsusp_autrvirresp_format",
" WHERE deb_sem>=",max_date))
if (dim(ili_tmp)[2]>0)
{
ili_tmp= ili_tmp %>% data.frame() %>% data.table()
#ili_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(ili_tmp,"data/ili_tmp.csv")
ili_tmp=fread("data/ili_tmp.csv")
ili=ili[deb_sem<max_date,]
ili=rbind(ili,ili_tmp)
ili[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(ili,-deb_sem)
}
#cat('query of mild\n')
mild<-fread("data/mild_export.csv")
#empilement des HFi:
hfi=fread("data/hfi.csv")
max_date=max(as.Date(hfi$deb_sem,origin="1970-01-01"))
conn_hfi <- src_postgres(dbname="data_iri",
host = "172.16.0.230",
user = "cnx_user",
password = "sig0000ipm")
hfi_tmp= tbl(conn_hfi,
build_sql("SELECT * FROM ","crosstab_iri_caid",
" WHERE deb_sem>=",max_date))
if ( dim(hfi_tmp)[2]>0 )
{
hfi_tmp= hfi_tmp %>% data.frame() %>% data.table()
#hfi_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(hfi_tmp,"data/hfi_tmp.csv")
hfi_tmp=fread("data/hfi_tmp.csv")
hfi=hfi[deb_sem<max_date,]
hfi=rbind(hfi,hfi_tmp)
hfi[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(hfi,-deb_sem)
}
########################Datasets that are part of
########################the reporting set#########
#palu autochtone:
if (reporting ==T)
{
max_date=max(as.Date(palu_autoch$deb_sem,origin="1970-01-01"))
palu_autoch_tmp=tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_autoch_format"," WHERE deb_sem>=",
max_date))
if (dim(palu_autoch_tmp)[2]>0 )
{
palu_autoch_tmp= palu_autoch_tmp %>% data.frame() %>% data.table()
#palu_autoch_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(palu_autoch_tmp,"data/palu_autoch_tmp.csv")
palu_autoch_tmp=fread("data/palu_autoch_tmp.csv")
palu_autoch=palu_autoch[deb_sem<max_date,]
palu_autoch=rbind(palu_autoch,palu_autoch_tmp)
palu_autoch[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(palu_autoch,-deb_sem)
}
##################################################################
#Paralysie flasque aigue
max_date=max(as.Date(pfa$deb_sem,origin="1970-01-01"))
pfa_tmp= tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_pfa_format",
" WHERE deb_sem>=",max_date))
if (dim(pfa_tmp)[2]>0)
{
pfa_tmp= pfa_tmp %>% data.frame() %>% data.table()
#pfa_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(pfa_tmp,"data/pfa_tmp.csv")
pfa_tmp=fread("data/pfa_tmp.csv")
pfa=pfa[deb_sem<max_date,]
pfa=rbind(pfa,pfa_tmp)
pfa[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(pfa,-deb_sem)
}
}
if (writing_to_disk==T )
{
#need conversion here---- NOT ACTUALLY
#cat('writing data locally...')
fwrite(PaluConf,"data/PaluConf.csv",sep=";")
fwrite(Consultations,"data/Consultations.csv",sep=";")
fwrite(SyndF,"data/SyndF.csv",sep=";")
fwrite(Diarrh,"data/Diarrh.csv",sep=";")
fwrite(Diarrh_feb,"data/Diarrh_feb.csv",sep=";")
#fwrite(tdr_eff,"data/tdr_eff.csv",sep=";")
fwrite(ili,"data/ili.csv",sep=";")
fwrite(pfa,"data/pfa.csv",sep=";")
fwrite(palu_autoch,"data/palu_autoch.csv",sep=";")
fwrite(hfi,"data/hfi.csv",sep=";")
fwrite(arbosusp,"data/arbosusp.csv",sep=";")
#cat('DONE\n')
}
##cat('tables in sentinel are:',src_tbls(sentinel),"\n")
} else {
setwd('/media/herimanitra/DONNEES/IPM_sentinelle/sentinel_hrmntr 291115/Sentinel')
PaluConf=fread("data/PaluConf.csv")
Consultations=fread("data/Consultations.csv")
SyndF=fread("data/SyndF.csv")
#SyndF[,deb_sem:=as.character((as.Date(deb_sem,origin="1970-01-01")))]
Diarrh=fread("data/Diarrh.csv")
Diarrh_feb=fread("data/Diarrh_feb.csv")
lst=fread("data/lst.csv")
ndvi=fread("data/ndvi.csv")
pmm=fread("data/pmm.csv")
caid=fread("data/caid.csv")
mild=fread("data/mild_export.csv")
ili=fread("data/ili.csv")
#ili[,deb_sem:=as.character((as.Date(deb_sem,origin="1970-01-01")))]
pfa=fread("data/pfa.csv")
palu_autoch=fread("data/palu_autoch.csv")
tdr_eff=fread("data/tdr_eff.csv")
hfi=fread("data/hfi.csv")
}
#cat('query of lat/long of sites...')
sentinel_latlong=fread("data/sentinel.csv")
setnames(sentinel_latlong,"CODE","sites")
sentinel_latlong[,sites:=tolower(sites)]
} else {
#cat("No need to pull data anymore!\n")
}
| /import_data_everynight.R | no_license | RanaivosonHerimanitra/Sentinel-windows | R | false | false | 13,572 | r | setwd("/media/herimanitra/DONNEES/IPM_sentinelle/sentinel_hrmntr 291115/Sentinel")
#whether to run on local for debugging and development purpose
#or to pull data directly from the server:
remote_server=T;writing_to_disk=T
#whether to run report or to run shiny:
reporting =T;
#load required packages:
source("libraries.R");#source("var_conversion.R")
if ( exists("PaluConf")==F ) #to speed up things
{
if ( remote_server==TRUE ) {
# cat("connection to the server and read data using dplyr to select views and tables:\n")
sentinel <- src_postgres(dbname="sentinel",
host = "172.16.0.230",
user = "cnx_florian",
password = "sigflorianipm")
data_iri_env<-src_postgres(dbname="data_iri",
host="172.16.0.230",
port=5432,
user="cnx_florian",
password="sigflorianipm")
# load data
PaluConf=fread("data/PaluConf.csv")
Consultations=fread("data/Consultations.csv")
SyndF=fread("data/SyndF.csv")
palu_autoch=fread("data/palu_autoch.csv")
Diarrh=fread("data/Diarrh.csv")
Diarrh_feb=fread("data/Diarrh_feb.csv")
ili=fread("data/ili.csv")
pfa=fread("data/pfa.csv")
arbosusp=fread("data/arbosusp.csv")
#tdr_eff=fread("data/tdr_eff.csv")
################################################################"
max_date=max(as.Date(PaluConf$deb_sem,origin="1970-01-01"))
#should accelerate extraction:
PaluConf_tmp= tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_paluconf_format"," WHERE deb_sem>=",
max_date
))
if (dim(PaluConf_tmp)[2]>0)
{
#transform into data.table:
PaluConf_tmp= PaluConf_tmp %>% data.frame() %>% data.table()
#PaluConf_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(PaluConf_tmp,"data/PaluConf_tmp.csv")
PaluConf_tmp=fread("data/PaluConf_tmp.csv")
PaluConf=PaluConf[deb_sem<max_date,]
PaluConf=rbind(PaluConf,PaluConf_tmp)
PaluConf[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(PaluConf,-deb_sem)
# #conversion of variables:
# var_conv(PaluConf,PaluConf_tmp)
# #rbind 02 dataframe:
# PaluConf=PaluConf[deb_sem<max_date,]
# PaluConf=(rbind(PaluConf,PaluConf_tmp))
# setorder(PaluConf,-deb_sem)
}
######################################
max_date=max(as.Date(Consultations$deb_sem,origin="1970-01-01"))
Consultations_tmp= tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_nxconslttotal_format",
" WHERE deb_sem>=",max_date))
if (dim(Consultations_tmp)[2]>0)
{
#transform into data.table:
Consultations_tmp= Consultations_tmp %>% data.frame() %>% data.table()
#Consultations_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(Consultations_tmp,"data/Consultations_tmp.csv")
Consultations_tmp=fread("data/Consultations_tmp.csv")
Consultations=Consultations[deb_sem<max_date,]
Consultations=rbind(Consultations,Consultations_tmp)
Consultations[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(Consultations,-deb_sem)
# #conversion of variables:
# var_conv(Consultations,Consultations_tmp)
# #rbind 02 dataframe:
# Consultations=Consultations[deb_sem<max_date,]
# Consultations=(rbind(Consultations,Consultations_tmp))
# setorder(Consultations,-deb_sem)
}
############################################################
max_date=max(as.Date(SyndF$deb_sem,origin="1970-01-01"))
SyndF_tmp=tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_syndf_format",
" WHERE deb_sem>=",max_date))
if ( dim(SyndF_tmp)[2]>0 )
{
SyndF_tmp= SyndF_tmp %>% data.frame() %>% data.table()
#SyndF_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(SyndF_tmp,"data/SyndF_tmp.csv")
SyndF_tmp=fread("data/SyndF_tmp.csv")
SyndF=SyndF[deb_sem<max_date,]
SyndF=rbind(SyndF,SyndF_tmp)
SyndF[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(SyndF,-deb_sem)
# #transform into data.table:
# SyndF_tmp= SyndF_tmp %>% data.frame() %>% data.table()
# #conversion of variables:
# var_conv(SyndF,SyndF_tmp)
# #rbind 02 dataframe:
# SyndF=SyndF[deb_sem<max_date,]
# SyndF=(rbind(SyndF,SyndF_tmp))
# setorder(SyndF,-deb_sem)
}
################################################################
max_date=max(as.Date(Diarrh$deb_sem,origin="1970-01-01"))
Diarrh_tmp=tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_diarrh_format",
" WHERE deb_sem>=",max_date))
if ( dim(Diarrh_tmp)[2]>0 )
{
Diarrh_tmp= Diarrh_tmp %>% data.frame() %>% data.table()
#Diarrh_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(Diarrh_tmp,"data/Diarrh_tmp.csv")
Diarrh_tmp=fread("data/Diarrh_tmp.csv")
Diarrh=Diarrh[deb_sem<max_date,]
Diarrh=rbind(Diarrh,Diarrh_tmp)
Diarrh[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(Diarrh,-deb_sem)
# #transform into data.table:
# Diarrh_tmp= Diarrh_tmp %>% data.frame() %>% data.table()
# #conversion of variables:
# var_conv(Diarrh,Diarrh_tmp)
# #rbind 02 dataframe:
# Diarrh=Diarrh[deb_sem<max_date,]
# Diarrh=(rbind(Diarrh,Diarrh_tmp))
# setorder(Diarrh,-deb_sem)
}
##########################################################"
max_date= max(as.Date(Diarrh_feb$deb_sem,origin="1970-01-01"))
Diarrh_feb_tmp=tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_diarrhfeb_format",
" WHERE deb_sem>=",max_date))
if ( dim(Diarrh_feb_tmp)[2]>0 )
{
Diarrh_feb_tmp= Diarrh_feb_tmp %>% data.frame() %>% data.table()
#Diarrh_feb_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(Diarrh_feb_tmp,"data/Diarrh_feb_tmp.csv")
Diarrh_feb_tmp=fread("data/Diarrh_feb_tmp.csv")
Diarrh_feb=Diarrh_feb[deb_sem<max_date,]
Diarrh_feb=rbind(Diarrh_feb,Diarrh_feb_tmp)
Diarrh_feb[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(Diarrh_feb,-deb_sem)
# #transform into data.table:
# Diarrh_feb_tmp= Diarrh_feb_tmp %>% data.frame() %>% data.table()
# #conversion of variables:
# var_conv(Diarrh_feb,Diarrh_feb_tmp)
# #remove old obs. and rbind 02 dataframe:
# Diarrh_feb=Diarrh_feb[deb_sem<max_date,]
# Diarrh_feb=unique(rbind(Diarrh_feb,Diarrh_feb_tmp))
# setorder(Diarrh_feb,-deb_sem)
}
max_date=max(as.Date(arbosusp$deb_sem,origin="1970-01-01"))
# Arbosusp
arbosusp_tmp=tbl(sentinel,build_sql("SELECT * FROM ","crosstab_arbosusp_format",
" WHERE deb_sem>=",max_date))
if (dim(arbosusp_tmp)[2]>0)
{
arbosusp_tmp= arbosusp_tmp %>% data.frame() %>% data.table()
#arbosusp_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(arbosusp_tmp,"data/arbosusp_tmp.csv")
arbosusp_tmp=fread("data/arbosusp_tmp.csv")
arbosusp=arbosusp[deb_sem<max_date,]
arbosusp=rbind(arbosusp,arbosusp_tmp)
arbosusp[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(arbosusp,-deb_sem)
}
#
# max_date = max(tdr_eff$deb_sem)
# tdr_eff_tmp= tbl(sentinel,
# build_sql('SELECT "Date" AS "deb_sem","SyndF","TestPalu","Centre2" AS "sites","Annee","Semaine","ArboSusp","GrippSusp","AutrVirResp","NxConsltTotal" FROM ',
# "vue_csb_sms_centre_format", " WHERE 'Date'>=",max_date))
#
# if (dim(tdr_eff_tmp)[2]>0)
# {
# #conversion of dataframe:
# tdr_eff_tmp = tdr_eff_tmp %>% data.frame() %>% data.table()
# #conversion of variables:
# var_conv(tdr_eff,tdr_eff_tmp)
# #rbind 02 dataframe:
# tdr_eff=tdr_eff[deb_sem<max_date,]
# tdr_eff=(rbind(tdr_eff,tdr_eff_tmp))
# setorder(tdr_eff,-deb_sem)
# }
max_date = max(as.Date(ili$deb_sem,origin="1970-01-01"))
ili_tmp=tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_grippsusp_autrvirresp_format",
" WHERE deb_sem>=",max_date))
if (dim(ili_tmp)[2]>0)
{
ili_tmp= ili_tmp %>% data.frame() %>% data.table()
#ili_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(ili_tmp,"data/ili_tmp.csv")
ili_tmp=fread("data/ili_tmp.csv")
ili=ili[deb_sem<max_date,]
ili=rbind(ili,ili_tmp)
ili[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(ili,-deb_sem)
}
#cat('query of mild\n')
mild<-fread("data/mild_export.csv")
#empilement des HFi:
hfi=fread("data/hfi.csv")
max_date=max(as.Date(hfi$deb_sem,origin="1970-01-01"))
conn_hfi <- src_postgres(dbname="data_iri",
host = "172.16.0.230",
user = "cnx_user",
password = "sig0000ipm")
hfi_tmp= tbl(conn_hfi,
build_sql("SELECT * FROM ","crosstab_iri_caid",
" WHERE deb_sem>=",max_date))
if ( dim(hfi_tmp)[2]>0 )
{
hfi_tmp= hfi_tmp %>% data.frame() %>% data.table()
#hfi_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(hfi_tmp,"data/hfi_tmp.csv")
hfi_tmp=fread("data/hfi_tmp.csv")
hfi=hfi[deb_sem<max_date,]
hfi=rbind(hfi,hfi_tmp)
hfi[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(hfi,-deb_sem)
}
########################Datasets that are part of
########################the reporting set#########
#palu autochtone:
if (reporting ==T)
{
max_date=max(as.Date(palu_autoch$deb_sem,origin="1970-01-01"))
palu_autoch_tmp=tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_autoch_format"," WHERE deb_sem>=",
max_date))
if (dim(palu_autoch_tmp)[2]>0 )
{
palu_autoch_tmp= palu_autoch_tmp %>% data.frame() %>% data.table()
#palu_autoch_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(palu_autoch_tmp,"data/palu_autoch_tmp.csv")
palu_autoch_tmp=fread("data/palu_autoch_tmp.csv")
palu_autoch=palu_autoch[deb_sem<max_date,]
palu_autoch=rbind(palu_autoch,palu_autoch_tmp)
palu_autoch[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(palu_autoch,-deb_sem)
}
##################################################################
#Paralysie flasque aigue
max_date=max(as.Date(pfa$deb_sem,origin="1970-01-01"))
pfa_tmp= tbl(sentinel,
build_sql("SELECT * FROM ",
"crosstab_pfa_format",
" WHERE deb_sem>=",max_date))
if (dim(pfa_tmp)[2]>0)
{
pfa_tmp= pfa_tmp %>% data.frame() %>% data.table()
#pfa_tmp[,deb_sem:=as.character(deb_sem)]
fwrite(pfa_tmp,"data/pfa_tmp.csv")
pfa_tmp=fread("data/pfa_tmp.csv")
pfa=pfa[deb_sem<max_date,]
pfa=rbind(pfa,pfa_tmp)
pfa[,deb_sem:=as.Date(deb_sem,origin="1970-01-01")]
setorder(pfa,-deb_sem)
}
}
if (writing_to_disk==T )
{
#need conversion here---- NOT ACTUALLY
#cat('writing data locally...')
fwrite(PaluConf,"data/PaluConf.csv",sep=";")
fwrite(Consultations,"data/Consultations.csv",sep=";")
fwrite(SyndF,"data/SyndF.csv",sep=";")
fwrite(Diarrh,"data/Diarrh.csv",sep=";")
fwrite(Diarrh_feb,"data/Diarrh_feb.csv",sep=";")
#fwrite(tdr_eff,"data/tdr_eff.csv",sep=";")
fwrite(ili,"data/ili.csv",sep=";")
fwrite(pfa,"data/pfa.csv",sep=";")
fwrite(palu_autoch,"data/palu_autoch.csv",sep=";")
fwrite(hfi,"data/hfi.csv",sep=";")
fwrite(arbosusp,"data/arbosusp.csv",sep=";")
#cat('DONE\n')
}
##cat('tables in sentinel are:',src_tbls(sentinel),"\n")
} else {
setwd('/media/herimanitra/DONNEES/IPM_sentinelle/sentinel_hrmntr 291115/Sentinel')
PaluConf=fread("data/PaluConf.csv")
Consultations=fread("data/Consultations.csv")
SyndF=fread("data/SyndF.csv")
#SyndF[,deb_sem:=as.character((as.Date(deb_sem,origin="1970-01-01")))]
Diarrh=fread("data/Diarrh.csv")
Diarrh_feb=fread("data/Diarrh_feb.csv")
lst=fread("data/lst.csv")
ndvi=fread("data/ndvi.csv")
pmm=fread("data/pmm.csv")
caid=fread("data/caid.csv")
mild=fread("data/mild_export.csv")
ili=fread("data/ili.csv")
#ili[,deb_sem:=as.character((as.Date(deb_sem,origin="1970-01-01")))]
pfa=fread("data/pfa.csv")
palu_autoch=fread("data/palu_autoch.csv")
tdr_eff=fread("data/tdr_eff.csv")
hfi=fread("data/hfi.csv")
}
#cat('query of lat/long of sites...')
sentinel_latlong=fread("data/sentinel.csv")
setnames(sentinel_latlong,"CODE","sites")
sentinel_latlong[,sites:=tolower(sites)]
} else {
#cat("No need to pull data anymore!\n")
}
|
\name{landscape.modify.epoch}
\alias{landscape.modify.epoch}
\title{Modifies one of the landscape's epochs}
\description{
This function updates the deomgraphic parameters in a landscape for a
particular epoch
}
\usage{
rland <- landscape.modify.epoch(rland,epoch=1,S=NULL,R=NULL,M=NULL,epochprob=NULL,startgen=NULL,extinct=NULL,carry=NULL,localprob=NULL)
}
\arguments{
\item{rland}{landscape object, required}
\item{epoch}{the epoch to modify, default 1}
\item{S}{(default=NULL) Survivablity matrix for epoch, NULL leaves unchanged}
\item{R}{(default=NULL) female Reproduction matrix for epoch, NULL
leaves unchanged}
\item{M}{(default=NULL) Male reporduction matrix for epoch, NULL
leaves unchanged}
\item{epochprob}{(default=NULL) probability of choosing this epoch, NULL
leaves unchanged}
\item{startgen}{(default=NULL) generation in which this epoch starts,
NULL leaves unchanged}
\item{extinct}{(default=NULL) vector of extinction probabilities per
generation for each subpopulation, NULL leaves unchanged}
\item{carry}{(default=NULL) vector of carrying capacities for each
subpopulation, must be rland$intparam$habitats in length, NULL
leaves unchanged}
\item{localprob}{(default=NULL) vector of probabilites for choosing
local demographies, must be length(rland$demography$localdem) in
length, NULL
leaves unchanged}
}
\keyword{misc}
| /man/landscape.modify.epoch.Rd | no_license | tfussell/rmetasim_gpu | R | false | false | 1,407 | rd | \name{landscape.modify.epoch}
\alias{landscape.modify.epoch}
\title{Modifies one of the landscape's epochs}
\description{
This function updates the deomgraphic parameters in a landscape for a
particular epoch
}
\usage{
rland <- landscape.modify.epoch(rland,epoch=1,S=NULL,R=NULL,M=NULL,epochprob=NULL,startgen=NULL,extinct=NULL,carry=NULL,localprob=NULL)
}
\arguments{
\item{rland}{landscape object, required}
\item{epoch}{the epoch to modify, default 1}
\item{S}{(default=NULL) Survivablity matrix for epoch, NULL leaves unchanged}
\item{R}{(default=NULL) female Reproduction matrix for epoch, NULL
leaves unchanged}
\item{M}{(default=NULL) Male reporduction matrix for epoch, NULL
leaves unchanged}
\item{epochprob}{(default=NULL) probability of choosing this epoch, NULL
leaves unchanged}
\item{startgen}{(default=NULL) generation in which this epoch starts,
NULL leaves unchanged}
\item{extinct}{(default=NULL) vector of extinction probabilities per
generation for each subpopulation, NULL leaves unchanged}
\item{carry}{(default=NULL) vector of carrying capacities for each
subpopulation, must be rland$intparam$habitats in length, NULL
leaves unchanged}
\item{localprob}{(default=NULL) vector of probabilites for choosing
local demographies, must be length(rland$demography$localdem) in
length, NULL
leaves unchanged}
}
\keyword{misc}
|
library(ggplot2)
licence_udelene_solarni <- licence_udelene_solarni %>%
mutate(typ=ifelse(vykon>0.010, "velké", "malé"))
grafdata <- licence_udelene_solarni %>%
arrange(datum) %>%
select(datum, vykon, typ) %>%
group_by(datum, typ) %>%
summarise(vykon_celkem=sum(vykon))
grafdata <- grafdata %>%
group_by(typ) %>%
mutate(cumsum=cumsum(vykon_celkem))
g <- ggplot(grafdata, aes(x=datum, y=cumsum, colour=typ))
g <- g + geom_line()
g
| /chart.R | no_license | DataRozhlas/eru-licence-scraper | R | false | false | 451 | r | library(ggplot2)
licence_udelene_solarni <- licence_udelene_solarni %>%
mutate(typ=ifelse(vykon>0.010, "velké", "malé"))
grafdata <- licence_udelene_solarni %>%
arrange(datum) %>%
select(datum, vykon, typ) %>%
group_by(datum, typ) %>%
summarise(vykon_celkem=sum(vykon))
grafdata <- grafdata %>%
group_by(typ) %>%
mutate(cumsum=cumsum(vykon_celkem))
g <- ggplot(grafdata, aes(x=datum, y=cumsum, colour=typ))
g <- g + geom_line()
g
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.train.R
\name{print.train}
\alias{print.train}
\title{Print Method for the train Class}
\usage{
\method{print}{train}(x, printCall = FALSE, details = FALSE,
selectCol = FALSE, showSD = FALSE, ...)
}
\arguments{
\item{x}{an object of class \code{\link{train}}.}
\item{printCall}{a logical to print the call at the top of the output}
\item{details}{a logical to show print or summary methods for the final
model. In some cases (such as \code{gbm}, \code{knn}, \code{lvq}, naive
Bayes and bagged tree models), no information will be printed even if
\code{details = TRUE}}
\item{selectCol}{a logical whether to add a column with a star next to the
selected parameters}
\item{showSD}{a logical whether to show the standard deviation of the
resampling results within parentheses (e.g. "4.24 (0.493)")}
\item{\dots}{options passed to \code{\link[base]{format}}}
}
\value{
A matrix with the complexity parameters and performance (invisibly).
}
\description{
Print the results of a \code{\link{train}} object.
}
\details{
The table of complexity parameters used, their resampled performance and a
flag for which rows are optimal.
}
\examples{
\dontrun{
data(iris)
TrainData <- iris[,1:4]
TrainClasses <- iris[,5]
options(digits = 3)
library(klaR)
rdaFit <- train(TrainData, TrainClasses, method = "rda",
control = trainControl(method = "cv"))
rdaFit
print(rdaFit, showSD = TRUE)
}
}
\author{
Max Kuhn
}
\seealso{
\code{\link{train}}
}
\keyword{print}
| /pkg/caret/man/print.train.Rd | no_license | JackStat/caret | R | false | true | 1,554 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.train.R
\name{print.train}
\alias{print.train}
\title{Print Method for the train Class}
\usage{
\method{print}{train}(x, printCall = FALSE, details = FALSE,
selectCol = FALSE, showSD = FALSE, ...)
}
\arguments{
\item{x}{an object of class \code{\link{train}}.}
\item{printCall}{a logical to print the call at the top of the output}
\item{details}{a logical to show print or summary methods for the final
model. In some cases (such as \code{gbm}, \code{knn}, \code{lvq}, naive
Bayes and bagged tree models), no information will be printed even if
\code{details = TRUE}}
\item{selectCol}{a logical whether to add a column with a star next to the
selected parameters}
\item{showSD}{a logical whether to show the standard deviation of the
resampling results within parentheses (e.g. "4.24 (0.493)")}
\item{\dots}{options passed to \code{\link[base]{format}}}
}
\value{
A matrix with the complexity parameters and performance (invisibly).
}
\description{
Print the results of a \code{\link{train}} object.
}
\details{
The table of complexity parameters used, their resampled performance and a
flag for which rows are optimal.
}
\examples{
\dontrun{
data(iris)
TrainData <- iris[,1:4]
TrainClasses <- iris[,5]
options(digits = 3)
library(klaR)
rdaFit <- train(TrainData, TrainClasses, method = "rda",
control = trainControl(method = "cv"))
rdaFit
print(rdaFit, showSD = TRUE)
}
}
\author{
Max Kuhn
}
\seealso{
\code{\link{train}}
}
\keyword{print}
|
library(shiny)
source("plotTest.R")
source("testUI.R")
df <- read.csv("risk_factors_cervical_cancer_Copie.csv", header = TRUE, sep = ";")
ui <- function.UI()
server <- function(input, output) {
output$df <- function.table(df)
}
shinyApp(ui = ui, server = server)
| /app.R | no_license | Vincent-Letourmy/Sem4 | R | false | false | 277 | r |
library(shiny)
source("plotTest.R")
source("testUI.R")
df <- read.csv("risk_factors_cervical_cancer_Copie.csv", header = TRUE, sep = ";")
ui <- function.UI()
server <- function(input, output) {
output$df <- function.table(df)
}
shinyApp(ui = ui, server = server)
|
command.arguments <- commandArgs(trailingOnly = TRUE);
output.directory <- command.arguments[1];
####################################################################################################
setwd(output.directory);
library(LearnBayes);
library(ggplot2);
library(scales);
### SECTION 4.4 ####################################################################################
bioassay <- data.frame(
log.dose = c(-0.86, -0.30, -0.05, 0.73),
sample.size = c(5,5,5,5),
deaths = c(0,1,3,5)
);
bioassay;
glm.results <- glm(
cbind(deaths,I(sample.size-deaths)) ~ log.dose,
data = bioassay,
family = "binomial"
);
summary(glm.results);
prior.pseudodata <- data.frame(
log.dose = c(-0.70, 0.60),
sample.size = c( 4.68, 2.84),
deaths = c( 1.12, 2.10)
);
prior.pseudodata;
posterior.pseudodata <- rbind(bioassay,prior.pseudodata);
colnames(posterior.pseudodata) <- c('x','n','y');
posterior.pseudodata;
grid.size <- 1e-2;
grid.beta0 <- -3 + (3-(-3)) * seq(0,1,grid.size);
grid.beta1 <- -1 + (9-(-1)) * seq(0,1,grid.size);
png("Fig4-5_contour-bioassay.png");
DF.temp <- expand.grid(x = grid.beta0, y = grid.beta1);
DF.temp <- cbind(
DF.temp,
log.density = apply(
X = DF.temp,
MARGIN = 1,
FUN =function(b) {return(logisticpost(beta=b,data=posterior.pseudodata));}
)
);
my.ggplot <- ggplot(data = NULL);
my.ggplot <- my.ggplot + stat_contour(data = DF.temp, aes(x,y,z=log.density));
my.ggplot;
dev.off();
png("Fig4-6_contour-points-bioassay.png");
DF.points <- as.data.frame(simcontour(
logf = logisticpost,
limits = c(-3,3,-1,9),
data = posterior.pseudodata,
m = 5000
));
my.ggplot <- ggplot(data = NULL);
my.ggplot <- my.ggplot + stat_contour(data = DF.temp, aes(x,y,z=log.density));
my.ggplot <- my.ggplot + geom_point(data = DF.points, aes(x,y), colour = alpha("darkolivegreen", 0.25));
my.ggplot;
dev.off();
png("Fig4-7_histogram-beta1.png");
qplot(x = DF.points[,'y'], geom = "density");
dev.off();
png("Fig4-8_histogram-LD50.png");
theta <- - DF.points[,'x'] / DF.points[,'y']
qplot(x = theta, geom = "histogram", binwidth = 0.05);
dev.off();
quantile(theta,c(0.025,0.975));
| /exercises/statistics/bayesian/albert/chap04/examples/section-04-04/code/albert-section-04-04.R | no_license | paradisepilot/statistics | R | false | false | 2,134 | r |
command.arguments <- commandArgs(trailingOnly = TRUE);
output.directory <- command.arguments[1];
####################################################################################################
setwd(output.directory);
library(LearnBayes);
library(ggplot2);
library(scales);
### SECTION 4.4 ####################################################################################
bioassay <- data.frame(
log.dose = c(-0.86, -0.30, -0.05, 0.73),
sample.size = c(5,5,5,5),
deaths = c(0,1,3,5)
);
bioassay;
glm.results <- glm(
cbind(deaths,I(sample.size-deaths)) ~ log.dose,
data = bioassay,
family = "binomial"
);
summary(glm.results);
prior.pseudodata <- data.frame(
log.dose = c(-0.70, 0.60),
sample.size = c( 4.68, 2.84),
deaths = c( 1.12, 2.10)
);
prior.pseudodata;
posterior.pseudodata <- rbind(bioassay,prior.pseudodata);
colnames(posterior.pseudodata) <- c('x','n','y');
posterior.pseudodata;
grid.size <- 1e-2;
grid.beta0 <- -3 + (3-(-3)) * seq(0,1,grid.size);
grid.beta1 <- -1 + (9-(-1)) * seq(0,1,grid.size);
png("Fig4-5_contour-bioassay.png");
DF.temp <- expand.grid(x = grid.beta0, y = grid.beta1);
DF.temp <- cbind(
DF.temp,
log.density = apply(
X = DF.temp,
MARGIN = 1,
FUN =function(b) {return(logisticpost(beta=b,data=posterior.pseudodata));}
)
);
my.ggplot <- ggplot(data = NULL);
my.ggplot <- my.ggplot + stat_contour(data = DF.temp, aes(x,y,z=log.density));
my.ggplot;
dev.off();
png("Fig4-6_contour-points-bioassay.png");
DF.points <- as.data.frame(simcontour(
logf = logisticpost,
limits = c(-3,3,-1,9),
data = posterior.pseudodata,
m = 5000
));
my.ggplot <- ggplot(data = NULL);
my.ggplot <- my.ggplot + stat_contour(data = DF.temp, aes(x,y,z=log.density));
my.ggplot <- my.ggplot + geom_point(data = DF.points, aes(x,y), colour = alpha("darkolivegreen", 0.25));
my.ggplot;
dev.off();
png("Fig4-7_histogram-beta1.png");
qplot(x = DF.points[,'y'], geom = "density");
dev.off();
png("Fig4-8_histogram-LD50.png");
theta <- - DF.points[,'x'] / DF.points[,'y']
qplot(x = theta, geom = "histogram", binwidth = 0.05);
dev.off();
quantile(theta,c(0.025,0.975));
|
#' Factorial Approach for Sorting Napping Task data
#'
#' Perform Factorial Approach for Sorting Napping Task data (FASNT) on a table
#' where the rows (i) are products and the columns (j) are for each consumer
#' the coordinates of the products on the tablecloth associated with napping on
#' the one hand and the partitionning variable associated with categorization
#' on the other hand. The columns are grouped by consumer. For the
#' partitionning variable, the label associated with a group can be an arbirary
#' label (for example G1 for group 1, \emph{etc.}) or the words associated with
#' the group in the case of qualified sorted napping.
#'
#'
#' @param don a data frame with n rows (products) and p columns (assesor :
#' categorical variables)
#' @param first 2 possibilities: "nappe" if the napping variables first appear
#' for each consumer or "catego" if it is the categorization variable
#' @param B the number of simulations (corresponding to the number of virtual
#' panels) used to compute the ellipses
#' @param axes a length 2 vector specifying the components to plot
#' @param alpha the confidence level of the ellipses
#' @param ncp number of dimensions kept in the results (by default 5)
#' @param graph boolean, if TRUE a graph is displayed
#' @param name.group a vector containing the name of the consumers (by default,
#' NULL and the group are named J1, J2 and so on)
#' @param sep.word the word separator character in the case of qualified sorted
#' napping
#' @param word.min minimum sample size for the word selection in textual
#' analysis
#' @param ncp.boot number of dimensions used for the Procrustean rotations to
#' build confidence ellipses (by default 2)
#' @return A list containing the following elements: \item{eig}{a matrix
#' containing all the eigenvalues, the percentage of variance and the
#' cumulative percentage of variance} \item{ind}{a list of matrices containing
#' all the results for the products (coordinates, square cosine,
#' contributions)} \item{quali.var}{a list of matrices containing all the
#' results for the categories of categorization (coordinates, square cosine,
#' contributions, v.test)} \item{quanti.var}{a list of matrices containing all
#' the results for the napping (coordinates, square cosine, contributions,
#' v.test)} \item{group}{a list of matrices containing all the results for
#' consumers (coordinates, square cosine, contributions)} \item{indicator}{a
#' list of matrices containing different indicators for napping and
#' categorization} \item{textual}{the results of the textual analysis for the
#' products} \item{call}{a list with some statistics}
#' @author Marine Cadoret, S\'ebastien L\^e
#' \email{sebastien.le@@agrocampus-ouest.fr}
#' @references Pag\`es, J., L\^e, S., Cadoret, M. (2010) \emph{The Sorted
#' Napping: a new holistic approach in sensory evaluation}. Journal of Sensory
#' Studies\cr Cadoret, M., L\^e, S., Pag\`es, J. (2009) \emph{Combining the
#' best of two worlds, the "sorted napping"}. SPISE. Ho Chi Minh City,
#' Vietnam\cr
#' @keywords multivariate
#' @examples
#'
#' \dontrun{
#' data(smoothies)
#' ## Example of FASNT results
#' res.fasnt<-fasnt(smoothies,first="nappe",sep.word=";")
#' }
#'
#' @export fasnt
fasnt=function(don,first="nappe",B=100,axes=c(1,2),alpha=0.05,ncp=5,graph=TRUE,name.group=NULL,sep.word=" ",word.min=5,ncp.boot=2){
don=data.frame(don)
I=nrow(don)
J=ncol(don)/3
#Si pour chaque sujet d'abord la categorisation puis la nappe
don_input=don
if (first=="catego"){
don2=don
for (i in 1:J){
don2[,(3*(i-1)+1):(3*(i-1)+2)]=don[,(3*(i-1)+2):(3*i)]
don2[,(3*i)]=don[,(3*(i-1)+1)]
colnames(don2)[(3*(i-1)+1):(3*(i-1)+2)]=colnames(don)[(3*(i-1)+2):(3*i)]
colnames(don2)[(3*i)]=colnames(don)[(3*(i-1)+1)]
}
don=don2
}
if(is.null(name.group)){
name.group=list(rep("aa",2*J),c(paste("J",1:J,sep="")))
name.group[[1]][seq(from=1,to=2*J,by=2)]=paste("J",1:J,"n",sep="")
name.group[[1]][seq(from=2,to=2*J,by=2)]=paste("J",1:J,"c",sep="") }
##############################FONCTIONS NECESSAIRES#############################
plot.HMFA.group.niv1 <- function(res.hmfa, coord = c(1, 2),title = NULL, cex = 1, sub.title = NULL) {
lab.x <- paste("Dim ", coord[1], " (", signif(res.hmfa$eig[coord[1],
2], 4), " %)", sep = "")
lab.y <- paste("Dim ", coord[2], " (", signif(res.hmfa$eig[coord[2],
2], 4), " %)", sep = "")
if (is.null(title))
title <- "Groups representation"
else sub.title <- "Groups representation"
coord.actif <- res.hmfa$group$coord[[1]][, coord]
gp=rep(c("1","2"),(length(coord.actif[,1])/2))
plot(coord.actif, xlab = lab.x, ylab = lab.y,
xlim = c(0, 1), ylim = c(0, 1), pch = 17, col = color[2],
cex = cex, main = title, cex.main = cex, asp = 1)
points(coord.actif[, 1], coord.actif[, 2], col = gp,
pch = 17, cex = cex)
text(coord.actif[, 1], y = coord.actif[, 2],
labels = rownames(coord.actif), pos = 3, col = gp)
title(sub = sub.title, cex.sub = cex, font.sub = 2, col.sub = "steelblue4",
adj = 0, line = 3.8)
}
color = c("black", "red", "green3", "blue", "cyan", "magenta",
"darkgray", "darkgoldenrod", "darkgreen", "violet",
"turquoise", "orange", "lightpink", "lavender", "yellow",
"lightgreen", "lightgrey", "lightblue", "darkkhaki",
"darkmagenta", "darkolivegreen", "lightcyan", "darkorange",
"darkorchid", "darkred", "darksalmon", "darkseagreen",
"darkslateblue", "darkslategray", "darkslategrey",
"darkturquoise", "darkviolet", "lightgray", "lightsalmon",
"lightyellow", "maroon")
#####################################DEBUT ANALYSE##############################
#AFMH
hierar=list(rep(c(2,1),J),rep(2,J))
afmh=HMFA(don,H=hierar,type=rep(c("c","n"),J),name.group=name.group,graph=F,ncp=ncp)
if (graph){
lab.x <- paste("Dim ", axes[1], " (", signif(afmh$eig[axes[1],2], 4), " %)", sep = "")
lab.y <- paste("Dim ", axes[2], " (", signif(afmh$eig[axes[2],2], 4), " %)", sep = "")
#graph des individus
xmin <- min(afmh$ind$coord[, axes[1]])
xmax <- max(afmh$ind$coord[, axes[1]])
ymin <- min(afmh$ind$coord[, axes[2]])
ymax <- max(afmh$ind$coord[, axes[2]])
x <- c(xmin, xmax) * 1.1
y <- c(ymin, ymax) * 1.1
dev.new()
plot(0, 0, xlab = lab.x, ylab = lab.y, xlim = x,ylim = y, col = "white",main="Individuals factor map", asp = 1)
abline(v = 0, lty = 2)
abline(h = 0, lty = 2)
points(afmh$ind$coord[, axes], pch = 15)
text(afmh$ind$coord[, axes[1]],afmh$ind$coord[, axes[2]], rownames(afmh$ind$coord),pos = 3, offset = 0.2, cex = 0.8)
#graph des individus et des variables quali
xmin <- min(afmh$ind$coord[, axes[1]],afmh$quali.var$coord[, axes[1]])
xmax <- max(afmh$ind$coord[, axes[1]],afmh$quali.var$coord[, axes[1]])
ymin <- min(afmh$ind$coord[, axes[2]],afmh$quali.var$coord[, axes[2]])
ymax <- max(afmh$ind$coord[, axes[2]],afmh$quali.var$coord[, axes[2]])
x <- c(xmin, xmax) * 1.1
y <- c(ymin, ymax) * 1.1
dev.new()
plot(0, 0, xlab = lab.x, ylab = lab.y, xlim = x,ylim = y, col = "white",main="Individuals factor map", asp = 1)
abline(v = 0, lty = 2)
abline(h = 0, lty = 2)
points(afmh$ind$coord[, axes], pch = 15,col=2)
points(afmh$quali.var$coord[, axes], pch = 15)
text(afmh$ind$coord[, axes[1]],afmh$ind$coord[, axes[2]],col=2, rownames(afmh$ind$coord),pos = 3, offset = 0.2, cex = 0.8)
text(afmh$quali.var$coord[, axes[1]],afmh$quali.var$coord[, axes[2]], rownames(afmh$quali.var$coord),pos = 3, offset = 0.2, cex = 0.8)
#Graph des points partiels
inter <- afmh$partial[[2]][, axes, 1]
for (i in 2:J) inter <- rbind(inter, afmh$partial[[2]][,axes, i])
xmin <- min(afmh$ind$coord[, axes[1]],inter[, axes[1]])
xmax <- max(afmh$ind$coord[, axes[1]],inter[, axes[1]])
ymin <- min(afmh$ind$coord[, axes[2]],inter[, axes[2]])
ymax <- max(afmh$ind$coord[, axes[2]],inter[, axes[2]])
x <- c(xmin, xmax) * 1.1
y <- c(ymin, ymax) * 1.1
dev.new()
plot(0, 0, xlab = lab.x, ylab = lab.y, xlim = x,ylim = y, col = "white",main="Individuals factor map", asp = 1)
abline(v = 0, lty = 2)
abline(h = 0, lty = 2)
points(afmh$ind$coord[, axes], pch = 15,col=1:I)
text(afmh$ind$coord[, axes[1]],afmh$ind$coord[, axes[2]],col=1:I, rownames(afmh$ind$coord),pos = 3, offset = 0.2, cex = 0.8)
for (j in 1:J) {
points(afmh$partial[[2]][, axes, j], col=rep(1:I,times=J),pch = 20, cex = 0.8)
text(afmh$partial[[2]][, axes, j], col=rep(1:I,times=J),labels=rownames(afmh$group$coord[[2]])[j],pos=3,cex=0.5)
for (i in 1:nrow(afmh$partial[[2]]))
lines(c(afmh$ind$coord[i,axes[1]], afmh$partial[[2]][i, axes[1],j]), c(afmh$ind$coord[i, axes[2]], afmh$partial[[2]][i,axes[2], j]),col=i)
}
#Graph des variables quanti
dev.new()
plot(0, 0, xlab = lab.x, ylab = lab.y,xlim = c(-1.1, 1.1), ylim = c(-1.1, 1.1),main="Correlation circle", col = "white",asp = 1)
x.cercle <- seq(-1, 1, by = 0.01)
y.cercle <- sqrt(1 - x.cercle^2)
lines(x.cercle, y = y.cercle)
lines(x.cercle, y = -y.cercle)
abline(v = 0, lty = 2)
abline(h = 0, lty = 2)
coord.var <- afmh$quanti.var$cor[, axes]
for (v in 1:nrow(coord.var)) {
arrows(0, 0, coord.var[v, 1], coord.var[v,2], length = 0.1, angle = 15, code = 2)
if (abs(coord.var[v, 1]) > abs(coord.var[v,2])) {
if (coord.var[v, 1] >= 0)
pos <- 4
else pos <- 2
}
else {
if (coord.var[v, 2] >= 0)
pos <- 3
else pos <- 1
}
text(coord.var[v, 1], y = coord.var[v, 2],labels = rownames(coord.var)[v], pos = pos)
}
#graph des sujets
coord.actif <- afmh$group$coord[[2]][, axes]
dev.new()
plot(coord.actif, xlab = lab.x, ylab = lab.y,xlim = c(0, 1), ylim = c(0, 1), pch = 17, col = color[2], main = "Subjects representation", asp = 1)
points(coord.actif[, 1], coord.actif[, 2], col = color[2],pch = 17)
text(coord.actif[, 1], y = coord.actif[, 2],labels = rownames(coord.actif), pos = 3, col = color[2])
#graph des methodes
coord.actif <- afmh$group$coord[[1]][, axes]
gp=rep(c("1","2"),(length(coord.actif[,1])/2))
dev.new()
plot(coord.actif, xlab = lab.x, ylab = lab.y,xlim = c(0, 1), ylim = c(0, 1), pch = 17, col = color[2],main = "Method's representation", asp = 1)
points(coord.actif[, 1], coord.actif[, 2], col = gp,pch = 17)
text(coord.actif[, 1], y = coord.actif[, 2],labels = rownames(coord.actif), pos = 3, col = gp)
}
########################################Ellipses
if (graph){
boot (don,method="sortnapping", level.conf = 1-alpha,nbsim=B,ncp=ncp.boot)}
#Indicateurs du napping
#Recuperer que les nappes
nappe_c=data.frame(matrix(NA,I,2*J))
for (i in 1:J){
nappe_c[,(2*(i-1)+1)]=don[,(3*(i-1)+1)]
colnames(nappe_c)[(2*(i-1)+1)]=colnames(don)[(3*(i-1)+1)]
nappe_c[,(2*i)]=don[,(3*(i-1)+2)]
colnames(nappe_c)[(2*i)]=colnames(don)[(3*(i-1)+2)]}
res.nappe=function(don){
I=nrow(don)
J=ncol(don)/2
X=Y=matrix(NA,I,J)
for (i in 1:J){
X[,i]=don[,(2*i-1)]
Y[,i]=don[,(2*i)]}
resultat=matrix(NA,5,3)
rownames(resultat)=c("St. dev. X","St. dev. Y","Range X","Range Y","Dimensionnality")
colnames(resultat)=c("min","median","max")
ecart.type.X=apply(X,2,sd)
ecart.type.Y=apply(Y,2,sd)
min.X=apply(X,2,min)
max.X=apply(X,2,max)
etendue.X=max.X-min.X
min.Y=apply(Y,2,min)
max.Y=apply(Y,2,max)
etendue.Y=max.Y-min.Y
res.pca=matrix(NA,2,J)
dimen=rep(NA,J)
for (i in 1:J){
acp=PCA(don[,(2*(i-1)+1):(2*i)],graph=F,scale.unit=F)
res.pca[,i]=acp$eig[,1]
dimen[i]=1+(res.pca[2,i]^2/res.pca[1,i]^2)}
resultat[1,1]=min(ecart.type.X)
resultat[1,2]=median(ecart.type.X)
resultat[1,3]=max(ecart.type.X)
resultat[2,1]=min(ecart.type.Y)
resultat[2,2]=median(ecart.type.Y)
resultat[2,3]=max(ecart.type.Y)
resultat[3,1]=min(etendue.X)
resultat[3,2]=median(etendue.X)
resultat[3,3]=max(etendue.X)
resultat[4,1]=min(etendue.Y)
resultat[4,2]=median(etendue.Y)
resultat[4,3]=max(etendue.Y)
resultat[5,1]=min(dimen)
resultat[5,2]=median(dimen)
resultat[5,3]=max(dimen)
return(resultat)}
res.nappe2=function(don){
I=nrow(don)
J=ncol(don)/2
X=Y=matrix(NA,I,J)
for (i in 1:J){
X[,i]=don[,(2*i-1)]
Y[,i]=don[,(2*i)]}
resultat=matrix(NA,3,2)
rownames(resultat)=c("PCA X","PCA Y","PCA F1")
colnames(resultat)=c("% inertia dim 1","% inertia plane 1-2")
acp.X=PCA(X,scale.unit=T,graph=F)
acp.Y=PCA(Y,scale.unit=T,graph=F)
res.pca=matrix(NA,I,J)
for (i in 1:J){
acp=PCA(don[,(2*(i-1)+1):(2*i)],graph=F,scale.unit=F)
res.pca[,i]=acp$ind$coord[,1]}
acp.F1=PCA(res.pca,scale.unit=T,graph=F)
resultat[1,1]=acp.X$eig[1,2]
resultat[1,2]=acp.X$eig[2,3]
resultat[2,1]=acp.Y$eig[1,2]
resultat[2,2]=acp.Y$eig[2,3]
resultat[3,1]=acp.F1$eig[1,2]
resultat[3,2]=acp.F1$eig[2,3]
return(list(resultat,res.pca))}
res1_nappe=res.nappe(nappe_c)
res2_nappe=res.nappe2(nappe_c)
#Indicateurs de la categorisation
catego_c=data.frame(matrix(NA,I,J))
for (i in 1:J){
catego_c[,i]=don[,(3*i)]
colnames(catego_c)[i]=colnames(don)[3*i]}
ordre_prod=order(afmh$ind$coord[,1])
coocc=tab.disjonctif(catego_c)%*%t(tab.disjonctif(catego_c))
coocc_reord=coocc[ordre_prod,ordre_prod]
colnames(coocc_reord)=rownames(coocc_reord)=rownames(afmh$ind$coord)[ordre_prod]
#Nombre de produits par classe
lev=rep(NA,J)
for (i in 1:J){
lev[i]=length(levels(catego_c[,i]))}
nbp=strsplit(summary(catego_c,maxsum=max(lev)),":")
agg=rep(0,J*max(lev))
for (i in 1:(J*max(lev))){
agg[i]=nbp[[i]][2]}
agg2=na.omit(agg)
agg2=as.factor(agg2)
if (graph){
dev.new()
plot(agg2,main="Number of products per group",xlab=c("Number of products"),ylab=c("Frequency"))}
#Nombre de groupes
lev2=as.factor(lev)
if (graph){
dev.new()
plot(lev2,main="Number of groups provided during sorting task",xlab=c("Number of groups"),ylab=c("Frequency"))}
##Analyse textuelle
texte=matrix(NA,(I*J),3)
texte=data.frame(texte)
texte[,1]=rep(rownames(don),J)
texte[,2]=rep(colnames(catego_c),each=I)
for (i in 1:J){
texte[((I*(i-1))+1):(I*i),3]=paste(catego_c[,i])}
restext=textual(texte,3,1,sep.word=sep.word)
#Suppression des modalite g1, ..., g99 (attention tout est mis en minuscule avce textual)
mod.suppr=paste("g",1:99,sep="")
mod.suppr=intersect(colnames(restext$cont.table),mod.suppr)
if (length(mod.suppr)!=0){
num.mod.suppr=which(colnames(restext$cont.table)%in%mod.suppr)
restext$cont.table=restext$cont.table[,-num.mod.suppr]
num.mod.suppr2=which(rownames(restext$nb.words)%in%mod.suppr)
restext$nb.words=restext$nb.words[-num.mod.suppr2,] }
#Nombre de mots differents
nb_mot_diff=nrow(restext$nb.words)
cat("Number of different words : ",nb_mot_diff,"\n")
#Nombre de mots par classe
mots=rep(NA,sum(lev))
grp=0
for (i in 1:J){
mots[(grp+1):(grp+lev[i])]=levels(catego_c[,i])
grp=grp+lev[i]}
mots_split=strsplit(mots,split=sep.word)
nb_mots=rep(NA,length(mots_split))
for (i in 1:length(mots_split)){
if (mots_split[[i]][1] %in% paste("G",1:99,sep="")){
nb_mots[i]=0}
else {
nb_mots[i]=length(mots_split[[i]])}}
nb_mots2=as.factor(nb_mots)
if (graph){
dev.new()
plot(nb_mots2,main="Number of words per group")}
#Seuil minimum a mettre en parametre...
freq_min=which(apply(restext$cont.table,2,sum)<=word.min)
if (length(freq_min)!=0){
restext$cont.table=restext$cont.table[,-freq_min]}
caract_prod=descfreq(restext$cont.table)
###########################Analyse textuelle
indicator=list(catego=coocc_reord,napping=list(res1_nappe,res2_nappe[[1]]))
call=list(hmfa=afmh,X=don_input)
ind=list(coord=afmh$ind$coord,cos2=afmh$ind$cos2,contrib=afmh$ind$contrib,partial=afmh$partial)
res = list(eig=afmh$eig,ind=ind,quali.var=afmh$quali.var,quanti.var=afmh$quanti.var,group=afmh$group,indicator=indicator,textual=caract_prod,call=call)
class(res) <- c("fasnt", "list ")
return(res)
}
| /R/fasnt.r | no_license | aswansyahputra/SensoMineR | R | false | false | 15,558 | r | #' Factorial Approach for Sorting Napping Task data
#'
#' Perform Factorial Approach for Sorting Napping Task data (FASNT) on a table
#' where the rows (i) are products and the columns (j) are for each consumer
#' the coordinates of the products on the tablecloth associated with napping on
#' the one hand and the partitionning variable associated with categorization
#' on the other hand. The columns are grouped by consumer. For the
#' partitionning variable, the label associated with a group can be an arbirary
#' label (for example G1 for group 1, \emph{etc.}) or the words associated with
#' the group in the case of qualified sorted napping.
#'
#'
#' @param don a data frame with n rows (products) and p columns (assesor :
#' categorical variables)
#' @param first 2 possibilities: "nappe" if the napping variables first appear
#' for each consumer or "catego" if it is the categorization variable
#' @param B the number of simulations (corresponding to the number of virtual
#' panels) used to compute the ellipses
#' @param axes a length 2 vector specifying the components to plot
#' @param alpha the confidence level of the ellipses
#' @param ncp number of dimensions kept in the results (by default 5)
#' @param graph boolean, if TRUE a graph is displayed
#' @param name.group a vector containing the name of the consumers (by default,
#' NULL and the group are named J1, J2 and so on)
#' @param sep.word the word separator character in the case of qualified sorted
#' napping
#' @param word.min minimum sample size for the word selection in textual
#' analysis
#' @param ncp.boot number of dimensions used for the Procrustean rotations to
#' build confidence ellipses (by default 2)
#' @return A list containing the following elements: \item{eig}{a matrix
#' containing all the eigenvalues, the percentage of variance and the
#' cumulative percentage of variance} \item{ind}{a list of matrices containing
#' all the results for the products (coordinates, square cosine,
#' contributions)} \item{quali.var}{a list of matrices containing all the
#' results for the categories of categorization (coordinates, square cosine,
#' contributions, v.test)} \item{quanti.var}{a list of matrices containing all
#' the results for the napping (coordinates, square cosine, contributions,
#' v.test)} \item{group}{a list of matrices containing all the results for
#' consumers (coordinates, square cosine, contributions)} \item{indicator}{a
#' list of matrices containing different indicators for napping and
#' categorization} \item{textual}{the results of the textual analysis for the
#' products} \item{call}{a list with some statistics}
#' @author Marine Cadoret, S\'ebastien L\^e
#' \email{sebastien.le@@agrocampus-ouest.fr}
#' @references Pag\`es, J., L\^e, S., Cadoret, M. (2010) \emph{The Sorted
#' Napping: a new holistic approach in sensory evaluation}. Journal of Sensory
#' Studies\cr Cadoret, M., L\^e, S., Pag\`es, J. (2009) \emph{Combining the
#' best of two worlds, the "sorted napping"}. SPISE. Ho Chi Minh City,
#' Vietnam\cr
#' @keywords multivariate
#' @examples
#'
#' \dontrun{
#' data(smoothies)
#' ## Example of FASNT results
#' res.fasnt<-fasnt(smoothies,first="nappe",sep.word=";")
#' }
#'
#' @export fasnt
fasnt=function(don,first="nappe",B=100,axes=c(1,2),alpha=0.05,ncp=5,graph=TRUE,name.group=NULL,sep.word=" ",word.min=5,ncp.boot=2){
don=data.frame(don)
I=nrow(don)
J=ncol(don)/3
#Si pour chaque sujet d'abord la categorisation puis la nappe
don_input=don
if (first=="catego"){
don2=don
for (i in 1:J){
don2[,(3*(i-1)+1):(3*(i-1)+2)]=don[,(3*(i-1)+2):(3*i)]
don2[,(3*i)]=don[,(3*(i-1)+1)]
colnames(don2)[(3*(i-1)+1):(3*(i-1)+2)]=colnames(don)[(3*(i-1)+2):(3*i)]
colnames(don2)[(3*i)]=colnames(don)[(3*(i-1)+1)]
}
don=don2
}
if(is.null(name.group)){
name.group=list(rep("aa",2*J),c(paste("J",1:J,sep="")))
name.group[[1]][seq(from=1,to=2*J,by=2)]=paste("J",1:J,"n",sep="")
name.group[[1]][seq(from=2,to=2*J,by=2)]=paste("J",1:J,"c",sep="") }
##############################FONCTIONS NECESSAIRES#############################
plot.HMFA.group.niv1 <- function(res.hmfa, coord = c(1, 2),title = NULL, cex = 1, sub.title = NULL) {
lab.x <- paste("Dim ", coord[1], " (", signif(res.hmfa$eig[coord[1],
2], 4), " %)", sep = "")
lab.y <- paste("Dim ", coord[2], " (", signif(res.hmfa$eig[coord[2],
2], 4), " %)", sep = "")
if (is.null(title))
title <- "Groups representation"
else sub.title <- "Groups representation"
coord.actif <- res.hmfa$group$coord[[1]][, coord]
gp=rep(c("1","2"),(length(coord.actif[,1])/2))
plot(coord.actif, xlab = lab.x, ylab = lab.y,
xlim = c(0, 1), ylim = c(0, 1), pch = 17, col = color[2],
cex = cex, main = title, cex.main = cex, asp = 1)
points(coord.actif[, 1], coord.actif[, 2], col = gp,
pch = 17, cex = cex)
text(coord.actif[, 1], y = coord.actif[, 2],
labels = rownames(coord.actif), pos = 3, col = gp)
title(sub = sub.title, cex.sub = cex, font.sub = 2, col.sub = "steelblue4",
adj = 0, line = 3.8)
}
color = c("black", "red", "green3", "blue", "cyan", "magenta",
"darkgray", "darkgoldenrod", "darkgreen", "violet",
"turquoise", "orange", "lightpink", "lavender", "yellow",
"lightgreen", "lightgrey", "lightblue", "darkkhaki",
"darkmagenta", "darkolivegreen", "lightcyan", "darkorange",
"darkorchid", "darkred", "darksalmon", "darkseagreen",
"darkslateblue", "darkslategray", "darkslategrey",
"darkturquoise", "darkviolet", "lightgray", "lightsalmon",
"lightyellow", "maroon")
#####################################DEBUT ANALYSE##############################
#AFMH
hierar=list(rep(c(2,1),J),rep(2,J))
afmh=HMFA(don,H=hierar,type=rep(c("c","n"),J),name.group=name.group,graph=F,ncp=ncp)
if (graph){
lab.x <- paste("Dim ", axes[1], " (", signif(afmh$eig[axes[1],2], 4), " %)", sep = "")
lab.y <- paste("Dim ", axes[2], " (", signif(afmh$eig[axes[2],2], 4), " %)", sep = "")
#graph des individus
xmin <- min(afmh$ind$coord[, axes[1]])
xmax <- max(afmh$ind$coord[, axes[1]])
ymin <- min(afmh$ind$coord[, axes[2]])
ymax <- max(afmh$ind$coord[, axes[2]])
x <- c(xmin, xmax) * 1.1
y <- c(ymin, ymax) * 1.1
dev.new()
plot(0, 0, xlab = lab.x, ylab = lab.y, xlim = x,ylim = y, col = "white",main="Individuals factor map", asp = 1)
abline(v = 0, lty = 2)
abline(h = 0, lty = 2)
points(afmh$ind$coord[, axes], pch = 15)
text(afmh$ind$coord[, axes[1]],afmh$ind$coord[, axes[2]], rownames(afmh$ind$coord),pos = 3, offset = 0.2, cex = 0.8)
#graph des individus et des variables quali
xmin <- min(afmh$ind$coord[, axes[1]],afmh$quali.var$coord[, axes[1]])
xmax <- max(afmh$ind$coord[, axes[1]],afmh$quali.var$coord[, axes[1]])
ymin <- min(afmh$ind$coord[, axes[2]],afmh$quali.var$coord[, axes[2]])
ymax <- max(afmh$ind$coord[, axes[2]],afmh$quali.var$coord[, axes[2]])
x <- c(xmin, xmax) * 1.1
y <- c(ymin, ymax) * 1.1
dev.new()
plot(0, 0, xlab = lab.x, ylab = lab.y, xlim = x,ylim = y, col = "white",main="Individuals factor map", asp = 1)
abline(v = 0, lty = 2)
abline(h = 0, lty = 2)
points(afmh$ind$coord[, axes], pch = 15,col=2)
points(afmh$quali.var$coord[, axes], pch = 15)
text(afmh$ind$coord[, axes[1]],afmh$ind$coord[, axes[2]],col=2, rownames(afmh$ind$coord),pos = 3, offset = 0.2, cex = 0.8)
text(afmh$quali.var$coord[, axes[1]],afmh$quali.var$coord[, axes[2]], rownames(afmh$quali.var$coord),pos = 3, offset = 0.2, cex = 0.8)
#Graph des points partiels
inter <- afmh$partial[[2]][, axes, 1]
for (i in 2:J) inter <- rbind(inter, afmh$partial[[2]][,axes, i])
xmin <- min(afmh$ind$coord[, axes[1]],inter[, axes[1]])
xmax <- max(afmh$ind$coord[, axes[1]],inter[, axes[1]])
ymin <- min(afmh$ind$coord[, axes[2]],inter[, axes[2]])
ymax <- max(afmh$ind$coord[, axes[2]],inter[, axes[2]])
x <- c(xmin, xmax) * 1.1
y <- c(ymin, ymax) * 1.1
dev.new()
plot(0, 0, xlab = lab.x, ylab = lab.y, xlim = x,ylim = y, col = "white",main="Individuals factor map", asp = 1)
abline(v = 0, lty = 2)
abline(h = 0, lty = 2)
points(afmh$ind$coord[, axes], pch = 15,col=1:I)
text(afmh$ind$coord[, axes[1]],afmh$ind$coord[, axes[2]],col=1:I, rownames(afmh$ind$coord),pos = 3, offset = 0.2, cex = 0.8)
for (j in 1:J) {
points(afmh$partial[[2]][, axes, j], col=rep(1:I,times=J),pch = 20, cex = 0.8)
text(afmh$partial[[2]][, axes, j], col=rep(1:I,times=J),labels=rownames(afmh$group$coord[[2]])[j],pos=3,cex=0.5)
for (i in 1:nrow(afmh$partial[[2]]))
lines(c(afmh$ind$coord[i,axes[1]], afmh$partial[[2]][i, axes[1],j]), c(afmh$ind$coord[i, axes[2]], afmh$partial[[2]][i,axes[2], j]),col=i)
}
#Graph des variables quanti
dev.new()
plot(0, 0, xlab = lab.x, ylab = lab.y,xlim = c(-1.1, 1.1), ylim = c(-1.1, 1.1),main="Correlation circle", col = "white",asp = 1)
x.cercle <- seq(-1, 1, by = 0.01)
y.cercle <- sqrt(1 - x.cercle^2)
lines(x.cercle, y = y.cercle)
lines(x.cercle, y = -y.cercle)
abline(v = 0, lty = 2)
abline(h = 0, lty = 2)
coord.var <- afmh$quanti.var$cor[, axes]
for (v in 1:nrow(coord.var)) {
arrows(0, 0, coord.var[v, 1], coord.var[v,2], length = 0.1, angle = 15, code = 2)
if (abs(coord.var[v, 1]) > abs(coord.var[v,2])) {
if (coord.var[v, 1] >= 0)
pos <- 4
else pos <- 2
}
else {
if (coord.var[v, 2] >= 0)
pos <- 3
else pos <- 1
}
text(coord.var[v, 1], y = coord.var[v, 2],labels = rownames(coord.var)[v], pos = pos)
}
#graph des sujets
coord.actif <- afmh$group$coord[[2]][, axes]
dev.new()
plot(coord.actif, xlab = lab.x, ylab = lab.y,xlim = c(0, 1), ylim = c(0, 1), pch = 17, col = color[2], main = "Subjects representation", asp = 1)
points(coord.actif[, 1], coord.actif[, 2], col = color[2],pch = 17)
text(coord.actif[, 1], y = coord.actif[, 2],labels = rownames(coord.actif), pos = 3, col = color[2])
#graph des methodes
coord.actif <- afmh$group$coord[[1]][, axes]
gp=rep(c("1","2"),(length(coord.actif[,1])/2))
dev.new()
plot(coord.actif, xlab = lab.x, ylab = lab.y,xlim = c(0, 1), ylim = c(0, 1), pch = 17, col = color[2],main = "Method's representation", asp = 1)
points(coord.actif[, 1], coord.actif[, 2], col = gp,pch = 17)
text(coord.actif[, 1], y = coord.actif[, 2],labels = rownames(coord.actif), pos = 3, col = gp)
}
########################################Ellipses
if (graph){
boot (don,method="sortnapping", level.conf = 1-alpha,nbsim=B,ncp=ncp.boot)}
#Indicateurs du napping
#Recuperer que les nappes
nappe_c=data.frame(matrix(NA,I,2*J))
for (i in 1:J){
nappe_c[,(2*(i-1)+1)]=don[,(3*(i-1)+1)]
colnames(nappe_c)[(2*(i-1)+1)]=colnames(don)[(3*(i-1)+1)]
nappe_c[,(2*i)]=don[,(3*(i-1)+2)]
colnames(nappe_c)[(2*i)]=colnames(don)[(3*(i-1)+2)]}
res.nappe=function(don){
I=nrow(don)
J=ncol(don)/2
X=Y=matrix(NA,I,J)
for (i in 1:J){
X[,i]=don[,(2*i-1)]
Y[,i]=don[,(2*i)]}
resultat=matrix(NA,5,3)
rownames(resultat)=c("St. dev. X","St. dev. Y","Range X","Range Y","Dimensionnality")
colnames(resultat)=c("min","median","max")
ecart.type.X=apply(X,2,sd)
ecart.type.Y=apply(Y,2,sd)
min.X=apply(X,2,min)
max.X=apply(X,2,max)
etendue.X=max.X-min.X
min.Y=apply(Y,2,min)
max.Y=apply(Y,2,max)
etendue.Y=max.Y-min.Y
res.pca=matrix(NA,2,J)
dimen=rep(NA,J)
for (i in 1:J){
acp=PCA(don[,(2*(i-1)+1):(2*i)],graph=F,scale.unit=F)
res.pca[,i]=acp$eig[,1]
dimen[i]=1+(res.pca[2,i]^2/res.pca[1,i]^2)}
resultat[1,1]=min(ecart.type.X)
resultat[1,2]=median(ecart.type.X)
resultat[1,3]=max(ecart.type.X)
resultat[2,1]=min(ecart.type.Y)
resultat[2,2]=median(ecart.type.Y)
resultat[2,3]=max(ecart.type.Y)
resultat[3,1]=min(etendue.X)
resultat[3,2]=median(etendue.X)
resultat[3,3]=max(etendue.X)
resultat[4,1]=min(etendue.Y)
resultat[4,2]=median(etendue.Y)
resultat[4,3]=max(etendue.Y)
resultat[5,1]=min(dimen)
resultat[5,2]=median(dimen)
resultat[5,3]=max(dimen)
return(resultat)}
res.nappe2=function(don){
I=nrow(don)
J=ncol(don)/2
X=Y=matrix(NA,I,J)
for (i in 1:J){
X[,i]=don[,(2*i-1)]
Y[,i]=don[,(2*i)]}
resultat=matrix(NA,3,2)
rownames(resultat)=c("PCA X","PCA Y","PCA F1")
colnames(resultat)=c("% inertia dim 1","% inertia plane 1-2")
acp.X=PCA(X,scale.unit=T,graph=F)
acp.Y=PCA(Y,scale.unit=T,graph=F)
res.pca=matrix(NA,I,J)
for (i in 1:J){
acp=PCA(don[,(2*(i-1)+1):(2*i)],graph=F,scale.unit=F)
res.pca[,i]=acp$ind$coord[,1]}
acp.F1=PCA(res.pca,scale.unit=T,graph=F)
resultat[1,1]=acp.X$eig[1,2]
resultat[1,2]=acp.X$eig[2,3]
resultat[2,1]=acp.Y$eig[1,2]
resultat[2,2]=acp.Y$eig[2,3]
resultat[3,1]=acp.F1$eig[1,2]
resultat[3,2]=acp.F1$eig[2,3]
return(list(resultat,res.pca))}
res1_nappe=res.nappe(nappe_c)
res2_nappe=res.nappe2(nappe_c)
#Indicateurs de la categorisation
catego_c=data.frame(matrix(NA,I,J))
for (i in 1:J){
catego_c[,i]=don[,(3*i)]
colnames(catego_c)[i]=colnames(don)[3*i]}
ordre_prod=order(afmh$ind$coord[,1])
coocc=tab.disjonctif(catego_c)%*%t(tab.disjonctif(catego_c))
coocc_reord=coocc[ordre_prod,ordre_prod]
colnames(coocc_reord)=rownames(coocc_reord)=rownames(afmh$ind$coord)[ordre_prod]
#Nombre de produits par classe
lev=rep(NA,J)
for (i in 1:J){
lev[i]=length(levels(catego_c[,i]))}
nbp=strsplit(summary(catego_c,maxsum=max(lev)),":")
agg=rep(0,J*max(lev))
for (i in 1:(J*max(lev))){
agg[i]=nbp[[i]][2]}
agg2=na.omit(agg)
agg2=as.factor(agg2)
if (graph){
dev.new()
plot(agg2,main="Number of products per group",xlab=c("Number of products"),ylab=c("Frequency"))}
#Nombre de groupes
lev2=as.factor(lev)
if (graph){
dev.new()
plot(lev2,main="Number of groups provided during sorting task",xlab=c("Number of groups"),ylab=c("Frequency"))}
##Analyse textuelle
texte=matrix(NA,(I*J),3)
texte=data.frame(texte)
texte[,1]=rep(rownames(don),J)
texte[,2]=rep(colnames(catego_c),each=I)
for (i in 1:J){
texte[((I*(i-1))+1):(I*i),3]=paste(catego_c[,i])}
restext=textual(texte,3,1,sep.word=sep.word)
#Suppression des modalite g1, ..., g99 (attention tout est mis en minuscule avce textual)
mod.suppr=paste("g",1:99,sep="")
mod.suppr=intersect(colnames(restext$cont.table),mod.suppr)
if (length(mod.suppr)!=0){
num.mod.suppr=which(colnames(restext$cont.table)%in%mod.suppr)
restext$cont.table=restext$cont.table[,-num.mod.suppr]
num.mod.suppr2=which(rownames(restext$nb.words)%in%mod.suppr)
restext$nb.words=restext$nb.words[-num.mod.suppr2,] }
#Nombre de mots differents
nb_mot_diff=nrow(restext$nb.words)
cat("Number of different words : ",nb_mot_diff,"\n")
#Nombre de mots par classe
mots=rep(NA,sum(lev))
grp=0
for (i in 1:J){
mots[(grp+1):(grp+lev[i])]=levels(catego_c[,i])
grp=grp+lev[i]}
mots_split=strsplit(mots,split=sep.word)
nb_mots=rep(NA,length(mots_split))
for (i in 1:length(mots_split)){
if (mots_split[[i]][1] %in% paste("G",1:99,sep="")){
nb_mots[i]=0}
else {
nb_mots[i]=length(mots_split[[i]])}}
nb_mots2=as.factor(nb_mots)
if (graph){
dev.new()
plot(nb_mots2,main="Number of words per group")}
#Seuil minimum a mettre en parametre...
freq_min=which(apply(restext$cont.table,2,sum)<=word.min)
if (length(freq_min)!=0){
restext$cont.table=restext$cont.table[,-freq_min]}
caract_prod=descfreq(restext$cont.table)
###########################Analyse textuelle
indicator=list(catego=coocc_reord,napping=list(res1_nappe,res2_nappe[[1]]))
call=list(hmfa=afmh,X=don_input)
ind=list(coord=afmh$ind$coord,cos2=afmh$ind$cos2,contrib=afmh$ind$contrib,partial=afmh$partial)
res = list(eig=afmh$eig,ind=ind,quali.var=afmh$quali.var,quanti.var=afmh$quanti.var,group=afmh$group,indicator=indicator,textual=caract_prod,call=call)
class(res) <- c("fasnt", "list ")
return(res)
}
|
// Plot 4
SCCcoal <- SCC[grepl("coal", SCC$Short.Name, ignore.case = T),]
NEIcoal <- NEI[NEI$SCC %in% SCCcoal$SCC,]
totalCoal <- aggregate(Emissions ~ year + type, NEIcoal, sum)
ggplot(totalCoal, aes(year, Emissions, col = type)) +
geom_line() +
geom_point() +
ggtitle(expression("Total US" ~ PM[2.5] ~ "Coal Emission by Type and Year")) +
xlab("Year") +
ylab(expression("US " ~ PM[2.5] ~ "Coal Emission")) +
scale_colour_discrete(name = "Type of sources") +
theme(legend.title = element_text(face = "bold"))
| /Proj2Plot4.R | no_license | KeerthanaJ1998/datasciencecoursera | R | false | false | 552 | r | // Plot 4
SCCcoal <- SCC[grepl("coal", SCC$Short.Name, ignore.case = T),]
NEIcoal <- NEI[NEI$SCC %in% SCCcoal$SCC,]
totalCoal <- aggregate(Emissions ~ year + type, NEIcoal, sum)
ggplot(totalCoal, aes(year, Emissions, col = type)) +
geom_line() +
geom_point() +
ggtitle(expression("Total US" ~ PM[2.5] ~ "Coal Emission by Type and Year")) +
xlab("Year") +
ylab(expression("US " ~ PM[2.5] ~ "Coal Emission")) +
scale_colour_discrete(name = "Type of sources") +
theme(legend.title = element_text(face = "bold"))
|
library(tidyverse) # this includes dplyr for data wranling, ggplot2 for plotting, and tidyr for reshaping data
library(shiny)
library(plotrix) # for standard error function
#library(DT)
# read in a file
df<-NA
aa<-FALSE
#shiny
if (interactive()) {
ui <- fluidPage(titlePanel("Ben Lab"),
sidebarLayout(
sidebarPanel(
#file upload
fileInput("file1", "Import CSV File",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv"),
),
#selector
checkboxInput("checkOne", label = "Time Series", value = FALSE),
selectInput("wells", "Choose a well:",
"",multiple = TRUE),
selectInput("para","Choose parameters:",
"",multiple = TRUE),
# uiOutput("selecters"),
uiOutput("loc"), # placeholder for time series selector if time series data selected
selectInput("field", "Choose a field:",
NULL,multiple = FALSE),
#checkboxGroupInput("calculation", label = "Calculation",choices = c("Mean","SEM"))
uiOutput("calculation"),
actionButton('plot_button','Plot')
),
mainPanel(
#tabset for plot, summary and table
tabsetPanel(
tabPanel("Plot", plotOutput("plot"),downloadButton('Save', 'save')),
tabPanel("Table", dataTableOutput("table")),
tabPanel("Summary", textOutput("summary"))
)
)
)
)
server <- function(input, output,session) {
# set the selector choices based on wells and parameters in input dataset
observeEvent(input$file1,{
df<<- read_csv(input$file1$datapath)
output$loc<-renderUI({
selectInput("loc", label = "Choose time point",
choices = unique(df[,3]) ,selected = 1
)
})
#updateSelectInput(session, 'time point', choices = list(1,2,3,4,5) )
updateSelectInput(session,"wells",choices = unique(df[,1]))
updateSelectInput(session,"field",choices = unique(df[,2]))
updateSelectInput(session,"para",choices = colnames(df))
# time points selection: need soft selection later
#unique(select(df, `Time Point`)))
})
# If user choose to display time series data
observeEvent(input$checkOne,{
# select time series data
if (input$checkOne == TRUE){
# render blank
output$calculation <- renderUI({
})
output$loc<-renderUI({
})
output$table <- renderDataTable({
#import file
#filter the dataset based on parameter and wells users interested in
data
})
}else{
if(!is.na(df)){
output$loc<-renderUI({
selectInput("loc", label = "Choose time point",
choices = unique(df[,3]) ,selected = 1
)
})
}
output$calculation <- renderUI({
checkboxGroupInput("calculation", label = "Calculation",choices = c("Mean","SEM"))
})
}
})
observeEvent(input$plot_button,{
if(!is.na(df) && input$checkOne){
dat <- df %>%
filter(`Well Name` %in% input$wells, `Field Number` %in% input$field) %>%
select(one_of(input$para),`Time Point`)
# Plotting time series
output$plot <- renderPlot({
# for( i in seq(1,length(input$para))) {
# ggplot(data=dat) +
# geom_smooth(aes(x=`Time Point`,y=input$para[i]))
# }
# aa <- input$para[1]
# ggplot(data=dat,aes(x=`Time Point`)) +
# geom_smooth(aes(y=aa))
dat %>%
gather(variable,value,-`Time Point`) %>%
ggplot(aes(`Time Point`, value)) +
geom_point() +
geom_smooth()+
facet_wrap(~variable)
})
}
output$table <- renderDataTable({
#import file
#filter the dataset based on parameter and wells users interested in
dat
})
output$summary <- renderText({
summary(dat)
})
})
# output$table <- renderDataTable({
# #import file
# #filter the dataset based on parameter and wells users interested in
# table()
# })
table <- reactive({
df<-read_csv(input$file1$datapath)
if(is.null(input$wells)){
return(df)
}
else{df %>%
select(input$para,`Well Name`,`Time Point`)%>%
filter('Well Name' %in% input$wells)}
})
observeEvent(input$calculation,{
if(input$calculation =="Mean"){
df_cal <- table()%>%
group_by(`Well Name`,`Time Point`)%>%
summarize_all(list(~mean(.)))%>%
as.data.frame()%>%
gather(key = "variable", value = "value", `Well Name`, -`Time Point`)
output$plot <- renderPlot({
df_cal%>%
ggplot() +
#making bars from only means
geom_bar(aes(x = `Time Point`, y = value, fill = variable),
stat = "identity")
})
}
else if(input$calculation =="SEM"){
df_cal <- table()%>%
group_by(`Well Name`,`Time Point`)%>%
summarize_all(list(~std.error(.)))%>%
as.data.frame()%>%
gather(key = "variable", value = "value", -`Well Name`, -`Time Point`)
output$plot <- renderPlot({
df_cal%>%
ggplot() +
#making bars from only means
geom_bar(aes(x = `Time Point`, y = value, fill = variable),
stat = "identity")
})
}
})
}
}
shinyApp(ui, server)
| /part1/app.R | no_license | WeiqiPeng0/CrokerLabSummer2019 | R | false | false | 6,291 | r |
library(tidyverse) # this includes dplyr for data wranling, ggplot2 for plotting, and tidyr for reshaping data
library(shiny)
library(plotrix) # for standard error function
#library(DT)
# read in a file
df<-NA
aa<-FALSE
#shiny
if (interactive()) {
ui <- fluidPage(titlePanel("Ben Lab"),
sidebarLayout(
sidebarPanel(
#file upload
fileInput("file1", "Import CSV File",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv"),
),
#selector
checkboxInput("checkOne", label = "Time Series", value = FALSE),
selectInput("wells", "Choose a well:",
"",multiple = TRUE),
selectInput("para","Choose parameters:",
"",multiple = TRUE),
# uiOutput("selecters"),
uiOutput("loc"), # placeholder for time series selector if time series data selected
selectInput("field", "Choose a field:",
NULL,multiple = FALSE),
#checkboxGroupInput("calculation", label = "Calculation",choices = c("Mean","SEM"))
uiOutput("calculation"),
actionButton('plot_button','Plot')
),
mainPanel(
#tabset for plot, summary and table
tabsetPanel(
tabPanel("Plot", plotOutput("plot"),downloadButton('Save', 'save')),
tabPanel("Table", dataTableOutput("table")),
tabPanel("Summary", textOutput("summary"))
)
)
)
)
server <- function(input, output,session) {
# set the selector choices based on wells and parameters in input dataset
observeEvent(input$file1,{
df<<- read_csv(input$file1$datapath)
output$loc<-renderUI({
selectInput("loc", label = "Choose time point",
choices = unique(df[,3]) ,selected = 1
)
})
#updateSelectInput(session, 'time point', choices = list(1,2,3,4,5) )
updateSelectInput(session,"wells",choices = unique(df[,1]))
updateSelectInput(session,"field",choices = unique(df[,2]))
updateSelectInput(session,"para",choices = colnames(df))
# time points selection: need soft selection later
#unique(select(df, `Time Point`)))
})
# If user choose to display time series data
observeEvent(input$checkOne,{
# select time series data
if (input$checkOne == TRUE){
# render blank
output$calculation <- renderUI({
})
output$loc<-renderUI({
})
output$table <- renderDataTable({
#import file
#filter the dataset based on parameter and wells users interested in
data
})
}else{
if(!is.na(df)){
output$loc<-renderUI({
selectInput("loc", label = "Choose time point",
choices = unique(df[,3]) ,selected = 1
)
})
}
output$calculation <- renderUI({
checkboxGroupInput("calculation", label = "Calculation",choices = c("Mean","SEM"))
})
}
})
observeEvent(input$plot_button,{
if(!is.na(df) && input$checkOne){
dat <- df %>%
filter(`Well Name` %in% input$wells, `Field Number` %in% input$field) %>%
select(one_of(input$para),`Time Point`)
# Plotting time series
output$plot <- renderPlot({
# for( i in seq(1,length(input$para))) {
# ggplot(data=dat) +
# geom_smooth(aes(x=`Time Point`,y=input$para[i]))
# }
# aa <- input$para[1]
# ggplot(data=dat,aes(x=`Time Point`)) +
# geom_smooth(aes(y=aa))
dat %>%
gather(variable,value,-`Time Point`) %>%
ggplot(aes(`Time Point`, value)) +
geom_point() +
geom_smooth()+
facet_wrap(~variable)
})
}
output$table <- renderDataTable({
#import file
#filter the dataset based on parameter and wells users interested in
dat
})
output$summary <- renderText({
summary(dat)
})
})
# output$table <- renderDataTable({
# #import file
# #filter the dataset based on parameter and wells users interested in
# table()
# })
table <- reactive({
df<-read_csv(input$file1$datapath)
if(is.null(input$wells)){
return(df)
}
else{df %>%
select(input$para,`Well Name`,`Time Point`)%>%
filter('Well Name' %in% input$wells)}
})
observeEvent(input$calculation,{
if(input$calculation =="Mean"){
df_cal <- table()%>%
group_by(`Well Name`,`Time Point`)%>%
summarize_all(list(~mean(.)))%>%
as.data.frame()%>%
gather(key = "variable", value = "value", `Well Name`, -`Time Point`)
output$plot <- renderPlot({
df_cal%>%
ggplot() +
#making bars from only means
geom_bar(aes(x = `Time Point`, y = value, fill = variable),
stat = "identity")
})
}
else if(input$calculation =="SEM"){
df_cal <- table()%>%
group_by(`Well Name`,`Time Point`)%>%
summarize_all(list(~std.error(.)))%>%
as.data.frame()%>%
gather(key = "variable", value = "value", -`Well Name`, -`Time Point`)
output$plot <- renderPlot({
df_cal%>%
ggplot() +
#making bars from only means
geom_bar(aes(x = `Time Point`, y = value, fill = variable),
stat = "identity")
})
}
})
}
}
shinyApp(ui, server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resDesign.R
\docType{data}
\name{resDesign}
\alias{resDesign}
\title{Simulation results for comparison of experimental designs}
\format{Include the heritability, the design used, the model used (models with squared brackets use genotypic information), the number of varieties, the number of observations, the Kendall metric and the top20 metric}
\usage{
data(resDesign)
}
\description{
In this simulation,
we only include LM and BT with genotypic information.
We run the procedure 100 times with h^2 = 0.2, 0.5, 0.8 with m = 240 and n = 180, 240, 300, 360.
17 Experimental designs are included, namely AP, RD, GD1~GD9, KM1~KM6. Please see the vignette for detailed
description of each experimental design.
}
\examples{
data(resDesign)
summary(resDesign)
}
\keyword{datasets}
| /man/resDesign.Rd | no_license | shuxiaoc/agRank | R | false | true | 854 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resDesign.R
\docType{data}
\name{resDesign}
\alias{resDesign}
\title{Simulation results for comparison of experimental designs}
\format{Include the heritability, the design used, the model used (models with squared brackets use genotypic information), the number of varieties, the number of observations, the Kendall metric and the top20 metric}
\usage{
data(resDesign)
}
\description{
In this simulation,
we only include LM and BT with genotypic information.
We run the procedure 100 times with h^2 = 0.2, 0.5, 0.8 with m = 240 and n = 180, 240, 300, 360.
17 Experimental designs are included, namely AP, RD, GD1~GD9, KM1~KM6. Please see the vignette for detailed
description of each experimental design.
}
\examples{
data(resDesign)
summary(resDesign)
}
\keyword{datasets}
|
###################################################################
# MAIN SCRIPT PLOT 2
###################################################################
#load function getdata.R and get data
source ("getdata.R")
df<-getdata()
# Start the png device
png(filename = "plot2.png",width = 480, height = 480, bg = "white")
# Generate graphic
Sys.setlocale(category = "LC_ALL", locale = "english")
plot(df$datetime, df$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power (kilowatts)")
# Close the device
dev.off() | /plot2.R | no_license | isabelmorenom/ExData_Plotting1 | R | false | false | 564 | r | ###################################################################
# MAIN SCRIPT PLOT 2
###################################################################
#load function getdata.R and get data
source ("getdata.R")
df<-getdata()
# Start the png device
png(filename = "plot2.png",width = 480, height = 480, bg = "white")
# Generate graphic
Sys.setlocale(category = "LC_ALL", locale = "english")
plot(df$datetime, df$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power (kilowatts)")
# Close the device
dev.off() |
devtools::document()
res <- devtools::check(".", error_on = "warning")
str(res)
| /examples/rcmdcheck/scratch.R | no_license | INWT/r-docker | R | false | false | 81 | r |
devtools::document()
res <- devtools::check(".", error_on = "warning")
str(res)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict_mave.R
\name{predict.mave}
\alias{predict.mave}
\alias{predict.mave.dim}
\title{Make predictions based on the dimension reduction space}
\usage{
\method{predict}{mave}(object, newx, dim, ...)
\method{predict}{mave.dim}(object, newx, dim = "dim.min", ...)
}
\arguments{
\item{object}{the object of class 'mave'}
\item{newx}{Matrix of the new data to be predicted}
\item{dim}{the dimension of central space or central mean space. The matrix of the original data will be
multiplied by the matrix of dimension reduction directions of given dimension. Then the prediction will be
made based on the data of given dimensions. The value of dim should be given when the class of the
argument dr is mave. When the class of the argument dr is mave.dim and dim is not given, the
function will return the basis matrix of CS or CMS of dimension selected by \code{\link{mave.dim}}}
\item{...}{further arguments passed to \code{\link{mars}} function such as degree.}
}
\value{
the prediced response of the new data
}
\description{
This method make predictions based the reduced dimension of data using \code{\link{mars}} function.
}
\examples{
X = matrix(rnorm(10000),1000,10)
beta1 = as.matrix(c(1,1,1,1,0,0,0,0,0,0))
beta2 = as.matrix(c(0,0,0,1,1,1,1,1,0,0))
err = as.matrix(rnorm(1000))
Y = X\%*\%beta1+X\%*\%beta2+err
train = sample(1:1000)[1:500]
x.train = X[train,]
y.train = as.matrix(Y[train])
x.test = X[-train,]
y.test = as.matrix(Y[-train])
dr = mave(y.train~x.train, method = 'meanopg')
yp = predict(dr,x.test,dim=3,degree=2)
#mean error
mean((yp-y.test)^2)
dr.dim = mave.dim(dr)
yp = predict(dr.dim,x.test,degree=2)
#mean error
mean((yp-y.test)^2)
}
\seealso{
\code{\link{mave}} for computing the dimension reduction space and \code{\link{mave.dim}} for
estimating the dimension of the dimension reduction space
}
| /MAVE/man/predict.mave.Rd | no_license | akhikolla/InformationHouse | R | false | true | 1,908 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict_mave.R
\name{predict.mave}
\alias{predict.mave}
\alias{predict.mave.dim}
\title{Make predictions based on the dimension reduction space}
\usage{
\method{predict}{mave}(object, newx, dim, ...)
\method{predict}{mave.dim}(object, newx, dim = "dim.min", ...)
}
\arguments{
\item{object}{the object of class 'mave'}
\item{newx}{Matrix of the new data to be predicted}
\item{dim}{the dimension of central space or central mean space. The matrix of the original data will be
multiplied by the matrix of dimension reduction directions of given dimension. Then the prediction will be
made based on the data of given dimensions. The value of dim should be given when the class of the
argument dr is mave. When the class of the argument dr is mave.dim and dim is not given, the
function will return the basis matrix of CS or CMS of dimension selected by \code{\link{mave.dim}}}
\item{...}{further arguments passed to \code{\link{mars}} function such as degree.}
}
\value{
the prediced response of the new data
}
\description{
This method make predictions based the reduced dimension of data using \code{\link{mars}} function.
}
\examples{
X = matrix(rnorm(10000),1000,10)
beta1 = as.matrix(c(1,1,1,1,0,0,0,0,0,0))
beta2 = as.matrix(c(0,0,0,1,1,1,1,1,0,0))
err = as.matrix(rnorm(1000))
Y = X\%*\%beta1+X\%*\%beta2+err
train = sample(1:1000)[1:500]
x.train = X[train,]
y.train = as.matrix(Y[train])
x.test = X[-train,]
y.test = as.matrix(Y[-train])
dr = mave(y.train~x.train, method = 'meanopg')
yp = predict(dr,x.test,dim=3,degree=2)
#mean error
mean((yp-y.test)^2)
dr.dim = mave.dim(dr)
yp = predict(dr.dim,x.test,degree=2)
#mean error
mean((yp-y.test)^2)
}
\seealso{
\code{\link{mave}} for computing the dimension reduction space and \code{\link{mave.dim}} for
estimating the dimension of the dimension reduction space
}
|
# INTERACTIVE FIGURES ##########################################################
library(leaflet)
# Bias GFWED
library("mapview")
mapview::mapview(c(bias_gfwed, climate_map))
mapview::mapview(bias_gfwed, at = breaks_bias, col.regions = pal_bias,
alpha.regions = 0.5, map.types = mapviewGetOption("OpenTopoMap"))
leaflet() %>%
addProviderTiles(providers$CartoDB.Positron, group = "CartoDB (default)") %>%
addProviderTiles(providers$OpenTopoMap, group = "OpenTopoMap") %>%
addRasterImage(bias_gfwed,
colors = colorBin(palette = pal_bias,
bins = breaks_bias,
na.color = "transparent"),
opacity = 0.5, group = "mean_bias_gfwed") %>%
addRasterImage(climate_map,
group = "climate_map") %>%
addLayersControl(
baseGroups = c("CartoDB (default)", "OpenTopoMap"),
overlayGroups = c("mean_bias_gfwed", "climate_map"),
options = layersControlOptions(collapsed = FALSE)
) %>%
addMiniMap()
# Bias ERAI
leaflet() %>%
addProviderTiles(providers$CartoDB.Positron, group = "CartoDB (default)") %>%
addProviderTiles(providers$OpenTopoMap, group = "OpenTopoMap") %>%
addRasterImage(bias_erai,
colors = colorBin(palette = pal_bias,
bins = breaks_bias,
na.color = "transparent"),
opacity = 0.5, group = "mean_bias_erai") %>%
addRasterImage(climate_map,
group = "climate_map") %>%
addLayersControl(
baseGroups = c("CartoDB (default)", "OpenTopoMap"),
overlayGroups = c("mean_bias_erai", "climate_map"),
options = layersControlOptions(collapsed = FALSE)
) %>%
addMiniMap() %>%
addLegend(pal = pal_bias,
values = values(bias_erai),
title = "Mean bias ERA5 vs ERAI")
# Check locations on interactive map
m <- leaflet(data = df_gfwed) %>%
# Base groups
addTiles() %>%
#addProviderTiles(providers$CartoDB.Positron, group = "CartoDB (default)") %>%
#addProviderTiles(providers$OpenTopoMap, group = "OpenTopoMap") %>%
addCircleMarkers(~x, ~y,
color = ~pal(bias),
fillOpacity = ~abs(1 - ac),
stroke = FALSE,
popup = ~paste("<strong>", "Bias:", "</strong>",
bias, "<br>",
"<strong>", "Anomaly correlation:",
"</strong>", ac, "<br>"),
group = "Validation points") %>%
setMaxBounds(lng1 = -180, lat1 = -90, lng2 = 180, lat2 = 90) %>%
addLegend(position = "topright",
pal = pal,
title = "GEFF-ERA5 vs OBS",
values = ~color,
opacity = 1,
labFormat = function(type, cuts, p) { # Here's the trick
paste0(labels)
}
) %>%
# Layers control
addLayersControl(
baseGroups = c("CartoDB (default)", "OpenTopoMap"),
overlayGroups = "Validation points",
options = layersControlOptions(collapsed = FALSE)
)
# PUBLISH ON RPUBS THE INTERACTIVE MAP, THEN TAKE A SCREENSHOT FOR THE PAPER
saveWidget(m, file = "GEFF-ERA5_2017_diagnostic_map.html", selfcontained = TRUE) | /scripts/interactive_maps.R | permissive | sunyong2088/paper_geff_era5 | R | false | false | 3,276 | r | # INTERACTIVE FIGURES ##########################################################
library(leaflet)
# Bias GFWED
library("mapview")
mapview::mapview(c(bias_gfwed, climate_map))
mapview::mapview(bias_gfwed, at = breaks_bias, col.regions = pal_bias,
alpha.regions = 0.5, map.types = mapviewGetOption("OpenTopoMap"))
leaflet() %>%
addProviderTiles(providers$CartoDB.Positron, group = "CartoDB (default)") %>%
addProviderTiles(providers$OpenTopoMap, group = "OpenTopoMap") %>%
addRasterImage(bias_gfwed,
colors = colorBin(palette = pal_bias,
bins = breaks_bias,
na.color = "transparent"),
opacity = 0.5, group = "mean_bias_gfwed") %>%
addRasterImage(climate_map,
group = "climate_map") %>%
addLayersControl(
baseGroups = c("CartoDB (default)", "OpenTopoMap"),
overlayGroups = c("mean_bias_gfwed", "climate_map"),
options = layersControlOptions(collapsed = FALSE)
) %>%
addMiniMap()
# Bias ERAI
leaflet() %>%
addProviderTiles(providers$CartoDB.Positron, group = "CartoDB (default)") %>%
addProviderTiles(providers$OpenTopoMap, group = "OpenTopoMap") %>%
addRasterImage(bias_erai,
colors = colorBin(palette = pal_bias,
bins = breaks_bias,
na.color = "transparent"),
opacity = 0.5, group = "mean_bias_erai") %>%
addRasterImage(climate_map,
group = "climate_map") %>%
addLayersControl(
baseGroups = c("CartoDB (default)", "OpenTopoMap"),
overlayGroups = c("mean_bias_erai", "climate_map"),
options = layersControlOptions(collapsed = FALSE)
) %>%
addMiniMap() %>%
addLegend(pal = pal_bias,
values = values(bias_erai),
title = "Mean bias ERA5 vs ERAI")
# Check locations on interactive map
m <- leaflet(data = df_gfwed) %>%
# Base groups
addTiles() %>%
#addProviderTiles(providers$CartoDB.Positron, group = "CartoDB (default)") %>%
#addProviderTiles(providers$OpenTopoMap, group = "OpenTopoMap") %>%
addCircleMarkers(~x, ~y,
color = ~pal(bias),
fillOpacity = ~abs(1 - ac),
stroke = FALSE,
popup = ~paste("<strong>", "Bias:", "</strong>",
bias, "<br>",
"<strong>", "Anomaly correlation:",
"</strong>", ac, "<br>"),
group = "Validation points") %>%
setMaxBounds(lng1 = -180, lat1 = -90, lng2 = 180, lat2 = 90) %>%
addLegend(position = "topright",
pal = pal,
title = "GEFF-ERA5 vs OBS",
values = ~color,
opacity = 1,
labFormat = function(type, cuts, p) { # Here's the trick
paste0(labels)
}
) %>%
# Layers control
addLayersControl(
baseGroups = c("CartoDB (default)", "OpenTopoMap"),
overlayGroups = "Validation points",
options = layersControlOptions(collapsed = FALSE)
)
# PUBLISH ON RPUBS THE INTERACTIVE MAP, THEN TAKE A SCREENSHOT FOR THE PAPER
saveWidget(m, file = "GEFF-ERA5_2017_diagnostic_map.html", selfcontained = TRUE) |
\name{getScores}
\alias{getScores}
\title{
Get CHiCAGO scores.
}
\description{
Converts p-values into a CHiCAGO score, using p-value weighting.
}
\usage{
getScores(cd, method = "weightedRelative",
includeTrans = TRUE, plot = TRUE, outfile = NULL)
}
\arguments{
\item{cd}{A \code{chicagoData} object.}
\item{method}{
Either "weightedRelative" (recommended), or "unweighted".
}
\item{includeTrans}{
If \code{FALSE}, trans interactions are discounted.
}
\item{plot}{
Plot a diagnostic plot.
}
\item{outfile}{
A string containing a .pdf file location to write to.
}
}
\section{Warning}{
The object \code{intData(cd)} is updated by reference. Thus, \code{intData(cd)} will be altered. See vignette for further information.
}
\details{
Weighting is performed using the parameters \code{weightAlpha}, \code{weightBeta}, \code{weightGamma}, \code{weightDelta}. Briefly, this function calculates weights \code{w} that decrease with increasing distance. Then, we construct weighted p-values \code{p/w}. As a result, the significance of long-range interactions is upweighted, and the significance of short-range interactions is downweighted.
Finally, the output score is calculated as \code{-log(p/w) - log(w_max)}, where \code{w_max} is the highest attainable weight, and provided the score is positive (otherwise it is set to 0).
Please see the CHiCAGO paper and its supplementary for full details.
}
\value{
An object of class \code{chicagoData}.
}
\references{
Genovese, C. R., Roeder, K., and Wasserman, L. (2006). False discovery control with p-value weighting. Biometrika, 93, 509-524. doi:10.1093/biomet/93.3.509
}
\author{
Jonathan Cairns
}
\seealso{
\code{\link{chicagoPipeline}}
}
\examples{
data(cdUnitTest)
##modifications to cdUnitTest, ensuring it uses correct design directory
designDir <- file.path(system.file("extdata", package="Chicago"), "unitTestDesign")
cdUnitTest <- modifySettings(cd=cdUnitTest, designDir=designDir)
cdUnitTest <- getScores(cdUnitTest)
}
| /Chicago/man/getScores.Rd | no_license | dovetail-genomics/chicago | R | false | false | 1,999 | rd | \name{getScores}
\alias{getScores}
\title{
Get CHiCAGO scores.
}
\description{
Converts p-values into a CHiCAGO score, using p-value weighting.
}
\usage{
getScores(cd, method = "weightedRelative",
includeTrans = TRUE, plot = TRUE, outfile = NULL)
}
\arguments{
\item{cd}{A \code{chicagoData} object.}
\item{method}{
Either "weightedRelative" (recommended), or "unweighted".
}
\item{includeTrans}{
If \code{FALSE}, trans interactions are discounted.
}
\item{plot}{
Plot a diagnostic plot.
}
\item{outfile}{
A string containing a .pdf file location to write to.
}
}
\section{Warning}{
The object \code{intData(cd)} is updated by reference. Thus, \code{intData(cd)} will be altered. See vignette for further information.
}
\details{
Weighting is performed using the parameters \code{weightAlpha}, \code{weightBeta}, \code{weightGamma}, \code{weightDelta}. Briefly, this function calculates weights \code{w} that decrease with increasing distance. Then, we construct weighted p-values \code{p/w}. As a result, the significance of long-range interactions is upweighted, and the significance of short-range interactions is downweighted.
Finally, the output score is calculated as \code{-log(p/w) - log(w_max)}, where \code{w_max} is the highest attainable weight, and provided the score is positive (otherwise it is set to 0).
Please see the CHiCAGO paper and its supplementary for full details.
}
\value{
An object of class \code{chicagoData}.
}
\references{
Genovese, C. R., Roeder, K., and Wasserman, L. (2006). False discovery control with p-value weighting. Biometrika, 93, 509-524. doi:10.1093/biomet/93.3.509
}
\author{
Jonathan Cairns
}
\seealso{
\code{\link{chicagoPipeline}}
}
\examples{
data(cdUnitTest)
##modifications to cdUnitTest, ensuring it uses correct design directory
designDir <- file.path(system.file("extdata", package="Chicago"), "unitTestDesign")
cdUnitTest <- modifySettings(cd=cdUnitTest, designDir=designDir)
cdUnitTest <- getScores(cdUnitTest)
}
|
#' Team Information
#'
#' Team information from team id.
#'
#' A list will be returned that contains three elements. The content, the url and the response
#' received from the api.
#'
#' The content element of the list contains the teams list of which each element is a team. For each
#' team different information is provided. Usually the following are included:
#'
#' \itemize{
#' \item \strong{name:} Team's name.
#' \item \strong{tag:} The team's tag.
#' \item \strong{time_created:} Unix timestamp of when the team was created.
#' \item \strong{calibration_games_remaining: :} Undocumented (possibly number of games until
#' a ranking score can be dedided).
#' \item \strong{logo:} The UGC id for the team logo.
#' \item \strong{logo_sponsor:} The UGC id for the team sponsor logo.
#' \item \strong{country_code:} The team's ISO 3166-1 country-code.
#' \item \strong{url:} Team's url which they provided.
#' \item \strong{games_played:} Number of games played.
#' \item \strong{player_*_account_id:} Player's account id. Will be as many columns as players.
#' \item \strong{admin_account_id:} Team's admin id.
#' \item \strong{league_id_*:} Undocumented (Probably leagues they participated in). Will be as
#' many columns as leagues.
#' \item \strong{series_type:} Series type.
#' \item \strong{league_series_id:} The league series id.
#' \item \strong{league_game_id:} The league game id.
#' \item \strong{stage_name:} The name of the stage.
#' \item \strong{league_tier:} League tier.
#' \item \strong{scoreboard:} A huge list containing scoreboard information.
#' }
#'
#' @param start_at_team_id (optional) Team id to start returning results from .
#'
#' @param teams_requested (optional) The number of teams to return.
#'
#' @return A dota_api object containing the elements described in the details.
#'
#' @inheritParams get_response
#' @inheritParams get_event_stats_for_account
#'
#' @section Steam API Documentation:
#' \url{https://wiki.teamfortress.com/wiki/WebAPI/GetTeamInfoByTeamID}
#'
#' @examples
#' \dontrun{
#' get_team_info_by_team_id()
#' get_team_info_by_team_id(teams_requested = 10)
#' get_team_info_by_team_id(language = 'en', key = NULL)
#' get_team_info_by_team_id(language = 'en', key = 'xxxxxxxxxxx')
#' }
#'
#' @export
get_team_info_by_team_id <- function(start_at_team_id = NULL,
teams_requested = NULL,
dota_id = 570,
language = 'en',
key = NULL) {
#make sure matches_requested is positive number
if (!is.null(teams_requested)) {
if (as.integer(teams_requested) < 0 | is.na(as.numeric(teams_requested))) {
stop('matches_requested must be positive')
}
}
#get query arguments
args <- list(start_at_team_id = start_at_team_id,
teams_requested = teams_requested,
key = key,
language = language)
#result
dota_result <- get_response(dota_id, 'GetTeamInfoByTeamID', 'IDOTA2Match', 1, args)
#remove some unnecessary levels
dota_result$content <- dota_result$content[[1]][-1]
#return
dota_result
}
| /R/get_team_info_by_team_id.R | no_license | cran/RDota2 | R | false | false | 3,264 | r | #' Team Information
#'
#' Team information from team id.
#'
#' A list will be returned that contains three elements. The content, the url and the response
#' received from the api.
#'
#' The content element of the list contains the teams list of which each element is a team. For each
#' team different information is provided. Usually the following are included:
#'
#' \itemize{
#' \item \strong{name:} Team's name.
#' \item \strong{tag:} The team's tag.
#' \item \strong{time_created:} Unix timestamp of when the team was created.
#' \item \strong{calibration_games_remaining: :} Undocumented (possibly number of games until
#' a ranking score can be dedided).
#' \item \strong{logo:} The UGC id for the team logo.
#' \item \strong{logo_sponsor:} The UGC id for the team sponsor logo.
#' \item \strong{country_code:} The team's ISO 3166-1 country-code.
#' \item \strong{url:} Team's url which they provided.
#' \item \strong{games_played:} Number of games played.
#' \item \strong{player_*_account_id:} Player's account id. Will be as many columns as players.
#' \item \strong{admin_account_id:} Team's admin id.
#' \item \strong{league_id_*:} Undocumented (Probably leagues they participated in). Will be as
#' many columns as leagues.
#' \item \strong{series_type:} Series type.
#' \item \strong{league_series_id:} The league series id.
#' \item \strong{league_game_id:} The league game id.
#' \item \strong{stage_name:} The name of the stage.
#' \item \strong{league_tier:} League tier.
#' \item \strong{scoreboard:} A huge list containing scoreboard information.
#' }
#'
#' @param start_at_team_id (optional) Team id to start returning results from .
#'
#' @param teams_requested (optional) The number of teams to return.
#'
#' @return A dota_api object containing the elements described in the details.
#'
#' @inheritParams get_response
#' @inheritParams get_event_stats_for_account
#'
#' @section Steam API Documentation:
#' \url{https://wiki.teamfortress.com/wiki/WebAPI/GetTeamInfoByTeamID}
#'
#' @examples
#' \dontrun{
#' get_team_info_by_team_id()
#' get_team_info_by_team_id(teams_requested = 10)
#' get_team_info_by_team_id(language = 'en', key = NULL)
#' get_team_info_by_team_id(language = 'en', key = 'xxxxxxxxxxx')
#' }
#'
#' @export
get_team_info_by_team_id <- function(start_at_team_id = NULL,
teams_requested = NULL,
dota_id = 570,
language = 'en',
key = NULL) {
#make sure matches_requested is positive number
if (!is.null(teams_requested)) {
if (as.integer(teams_requested) < 0 | is.na(as.numeric(teams_requested))) {
stop('matches_requested must be positive')
}
}
#get query arguments
args <- list(start_at_team_id = start_at_team_id,
teams_requested = teams_requested,
key = key,
language = language)
#result
dota_result <- get_response(dota_id, 'GetTeamInfoByTeamID', 'IDOTA2Match', 1, args)
#remove some unnecessary levels
dota_result$content <- dota_result$content[[1]][-1]
#return
dota_result
}
|
# required R packages
CRAN <- c("RCurl", "devtools", "readr")
GITHUB <- c(
"tidyverse/purrr",
"tidyverse/modelr",
"rstudio/tensorflow",
"rstudio/cloudml",
"rstudio/keras",
"rstudio/tfruns",
"rstudio/tfestimators"
)
# save repository + download methods
repos <- getOption("repos")
download.file.method <- getOption("download.file.method")
download.file.extra <- getOption("download.file.extra")
# emit warnings as they occur
options(warn = 1)
on.exit(
options(
repos = repos,
download.file.method = download.file.method,
download.file.extra = download.file.extra
),
add = TRUE
)
# set an appropriate downloader
if (nzchar(Sys.which("curl"))) {
options(
repos = c(CRAN = "https://cran.rstudio.com"),
download.file.method = "curl",
download.file.extra = "-L -f"
)
} else if (nzchar(Sys.which("wget"))) {
options(
repos = c(CRAN = "https://cran.rstudio.com"),
download.file.method = "wget",
download.file.extra = NULL
)
} else {
options(repos = c(CRAN = "http://cran.rstudio.com"))
}
# source a file 'dependencies.R', if it exists
if (file.exists("dependencies.R"))
source("dependencies.R")
# attempt to restore using a packrat lockfile
if (file.exists("packrat/packrat.lock")) {
# ensure packrat is installed
if (!"packrat" %in% rownames(installed.packages()))
install.packages("packrat")
# attempt a project restore
packrat::restore()
packrat::on()
}
# discover available R packages
installed <- rownames(installed.packages())
# install required CRAN packages
for (pkg in CRAN) {
if (pkg %in% installed)
next
install.packages(pkg)
}
# install required GitHub packages
for (uri in GITHUB) {
if (basename(uri) %in% installed)
next
devtools::install_github(uri)
}
# Training ----
library(cloudml)
# read deployment information
deploy <- readRDS("cloudml/deploy.rds")
# source entrypoint
run_dir <- tfruns::unique_run_dir()
tfruns::training_run(file = deploy$entrypoint,
context = deploy$environment,
flags = deploy$overlay,
encoding = "UTF-8",
echo = TRUE,
view = FALSE,
run_dir = run_dir)
# upload run directory to requested bucket (if any)
config <- yaml::yaml.load_file("cloudml.yml")
cloudml <- config$cloudml
storage <- cloudml[["storage-bucket"]]
if (is.character(storage)) {
source <- run_dir
target <- file.path(storage, run_dir)
system(paste(gsutil(), "cp", "-r", shQuote(source), shQuote(target)))
}
| /inst/cloudml/cloudml/deploy.R | no_license | Geoany/cloudml | R | false | false | 2,554 | r | # required R packages
CRAN <- c("RCurl", "devtools", "readr")
GITHUB <- c(
"tidyverse/purrr",
"tidyverse/modelr",
"rstudio/tensorflow",
"rstudio/cloudml",
"rstudio/keras",
"rstudio/tfruns",
"rstudio/tfestimators"
)
# save repository + download methods
repos <- getOption("repos")
download.file.method <- getOption("download.file.method")
download.file.extra <- getOption("download.file.extra")
# emit warnings as they occur
options(warn = 1)
on.exit(
options(
repos = repos,
download.file.method = download.file.method,
download.file.extra = download.file.extra
),
add = TRUE
)
# set an appropriate downloader
if (nzchar(Sys.which("curl"))) {
options(
repos = c(CRAN = "https://cran.rstudio.com"),
download.file.method = "curl",
download.file.extra = "-L -f"
)
} else if (nzchar(Sys.which("wget"))) {
options(
repos = c(CRAN = "https://cran.rstudio.com"),
download.file.method = "wget",
download.file.extra = NULL
)
} else {
options(repos = c(CRAN = "http://cran.rstudio.com"))
}
# source a file 'dependencies.R', if it exists
if (file.exists("dependencies.R"))
source("dependencies.R")
# attempt to restore using a packrat lockfile
if (file.exists("packrat/packrat.lock")) {
# ensure packrat is installed
if (!"packrat" %in% rownames(installed.packages()))
install.packages("packrat")
# attempt a project restore
packrat::restore()
packrat::on()
}
# discover available R packages
installed <- rownames(installed.packages())
# install required CRAN packages
for (pkg in CRAN) {
if (pkg %in% installed)
next
install.packages(pkg)
}
# install required GitHub packages
for (uri in GITHUB) {
if (basename(uri) %in% installed)
next
devtools::install_github(uri)
}
# Training ----
library(cloudml)
# read deployment information
deploy <- readRDS("cloudml/deploy.rds")
# source entrypoint
run_dir <- tfruns::unique_run_dir()
tfruns::training_run(file = deploy$entrypoint,
context = deploy$environment,
flags = deploy$overlay,
encoding = "UTF-8",
echo = TRUE,
view = FALSE,
run_dir = run_dir)
# upload run directory to requested bucket (if any)
config <- yaml::yaml.load_file("cloudml.yml")
cloudml <- config$cloudml
storage <- cloudml[["storage-bucket"]]
if (is.character(storage)) {
source <- run_dir
target <- file.path(storage, run_dir)
system(paste(gsutil(), "cp", "-r", shQuote(source), shQuote(target)))
}
|
\name{searchBS}
\alias{searchBS}
\title{Brickset search GET request}
\usage{
searchBS(url = "http://brickset.com/webServices/brickset.asmx/",
apiKey = "", userHash = "", query = "", theme = "", subtheme = "",
setNumber = "", year = "", Owned = "", Wanted = "")
}
\arguments{
\item{url}{brickset webservices url}
\item{apiKey}{brickset apiKey (s. details)}
\item{userHash}{brickset userHash (s. details)}
\item{query}{A search string.}
\item{theme}{Valid name of a theme.}
\item{substheme}{Valid name of a subtheme.}
\item{setNumber}{string or integer (s. details)}
\item{year}{string or integer}
\item{Owned}{Pass a '1' to get a list of set(s) the
specified user owns.}
\item{Wanted}{Pass a '1' to get a list of set(s) the
specified user wants.}
}
\value{
brickset XML response set
}
\description{
Perform brickset search request via http GET
}
\details{
If \code{setNumber} does not include a '\code{-#}' after
the number (e.g. 70006-1), the suffix \code{-1} is added
automatically
Without providing a valid \code{apiKey} a maximum of 20
results will be returned.
If \code{userHash} is provided, the returned data will
contain flags indicating whether the specified user owns
and/or wants the set.
}
| /package/man/searchBS.Rd | no_license | steffenWagner/LegoPricing | R | false | false | 1,241 | rd | \name{searchBS}
\alias{searchBS}
\title{Brickset search GET request}
\usage{
searchBS(url = "http://brickset.com/webServices/brickset.asmx/",
apiKey = "", userHash = "", query = "", theme = "", subtheme = "",
setNumber = "", year = "", Owned = "", Wanted = "")
}
\arguments{
\item{url}{brickset webservices url}
\item{apiKey}{brickset apiKey (s. details)}
\item{userHash}{brickset userHash (s. details)}
\item{query}{A search string.}
\item{theme}{Valid name of a theme.}
\item{substheme}{Valid name of a subtheme.}
\item{setNumber}{string or integer (s. details)}
\item{year}{string or integer}
\item{Owned}{Pass a '1' to get a list of set(s) the
specified user owns.}
\item{Wanted}{Pass a '1' to get a list of set(s) the
specified user wants.}
}
\value{
brickset XML response set
}
\description{
Perform brickset search request via http GET
}
\details{
If \code{setNumber} does not include a '\code{-#}' after
the number (e.g. 70006-1), the suffix \code{-1} is added
automatically
Without providing a valid \code{apiKey} a maximum of 20
results will be returned.
If \code{userHash} is provided, the returned data will
contain flags indicating whether the specified user owns
and/or wants the set.
}
|
## These Functions are intended to cache matrix and then find the inverse
## after checking to see if there is cache value
## cache matrix inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setmatrix = setmatrix,
getsolve = getsolve)
}
## Solve for Inverse with Cache
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setmatrix(m)
m
} | /cachematrix.R | no_license | dright45/ProgrammingAssignment2 | R | false | false | 664 | r | ## These Functions are intended to cache matrix and then find the inverse
## after checking to see if there is cache value
## cache matrix inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setmatrix = setmatrix,
getsolve = getsolve)
}
## Solve for Inverse with Cache
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setmatrix(m)
m
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{rpdirichlet}
\alias{rpdirichlet}
\title{Random Samples from the Product-Dirichlet Distribution}
\usage{
rpdirichlet(n, alpha, options, drop_fixed = TRUE)
}
\arguments{
\item{n}{number of samples}
\item{alpha}{Dirichlet parameters concatenated across independent conditions
(e.g., a1,a2, b1,b2,b3)}
\item{options}{the number of choice options per item type, e.g., \code{c(2,3)}
for a binary and ternary condition.
The sum of \code{options} must be equal to the length of \code{alpha}.}
\item{drop_fixed}{whether the output matrix includes the last probability for each category
(which is not a free parameter since probabilities must sum to one).}
}
\description{
Random samples from the prior/posterior (i.e., product-Dirichlet) of the unconstrained
product-multinomial model (the encompassing model).
}
\examples{
# standard uniform Dirichlet
rpdirichlet(5, c(1,1,1,1), 4)
rpdirichlet(5, c(1,1,1,1), 4, drop_fixed = FALSE)
# two ternary outcomes: (a1,a2,a3, b1,b2,b3)
rpdirichlet(5, c(9,5,1, 3,6,6), c(3,3))
rpdirichlet(5, c(9,5,1, 3,6,6), c(3,3), drop_fixed = FALSE)
}
| /multinomineq/man/rpdirichlet.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | true | 1,211 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{rpdirichlet}
\alias{rpdirichlet}
\title{Random Samples from the Product-Dirichlet Distribution}
\usage{
rpdirichlet(n, alpha, options, drop_fixed = TRUE)
}
\arguments{
\item{n}{number of samples}
\item{alpha}{Dirichlet parameters concatenated across independent conditions
(e.g., a1,a2, b1,b2,b3)}
\item{options}{the number of choice options per item type, e.g., \code{c(2,3)}
for a binary and ternary condition.
The sum of \code{options} must be equal to the length of \code{alpha}.}
\item{drop_fixed}{whether the output matrix includes the last probability for each category
(which is not a free parameter since probabilities must sum to one).}
}
\description{
Random samples from the prior/posterior (i.e., product-Dirichlet) of the unconstrained
product-multinomial model (the encompassing model).
}
\examples{
# standard uniform Dirichlet
rpdirichlet(5, c(1,1,1,1), 4)
rpdirichlet(5, c(1,1,1,1), 4, drop_fixed = FALSE)
# two ternary outcomes: (a1,a2,a3, b1,b2,b3)
rpdirichlet(5, c(9,5,1, 3,6,6), c(3,3))
rpdirichlet(5, c(9,5,1, 3,6,6), c(3,3), drop_fixed = FALSE)
}
|
#Definir directorio
setwd("C:/Users/familia CB/Desktop/Datos Control")
#Abrir archivo
file <- paste0(getwd(),"/Control2.csv")
data <- read.csv2("Control2.csv", head= TRUE, sep = ';')
str(data)
#3.- a)Retorno esperado para cada empresa
RIGPA <- mean(data$RETORNO.IGPA)
RIGPA
RSMSAAM <- mean(data$RETORNO.SM.SAAM)
RSMSAAM
RSQM.B <- mean(data$RETORNO.SQM.B)
RSQM.B
RVAPORES <- mean(data$RETORNO.VAPORES)
RVAPORES
#b)Desviacion estandar de los retornos de cada empresa
DsIGPA <- sd(data$RETORNO.IGPA)
DsIGPA
DsSMSAAM <- sd(data$RETORNO.SM.SAAM)
DsSMSAAM
DsSQM.B <- sd(data$RETORNO.SQM.B)
DsSQM.B
DsVAPORES <-sd(data$RETORNO.VAPORES)
DsVAPORES
#c)Matriz de correlacion
x <- data.frame(data$RETORNO.IGPA, data$RETORNO.SM.SAAM, data$RETORNO.SQM.B, data$RETORNO.VAPORES)
cor(x)
#4.- Regresion para cada empresa
#SM.SAAM.S.A.
regSMSAAM <- lm(data$RETORNO.SM.SAAM ~ data$RETORNO.IGPA, data=data)
regSMSAAM
#SQM-B
regSQM.B <- lm(data$RETORNO.SQM.B ~ data$RETORNO.IGPA, data=data)
regSQM.B
#VAPORES
regVAPORES <- lm(data$RETORNO.VAPORES ~ data$RETORNO.IGPA, data=data)
regVAPORES | /Control2.R | permissive | kcontrerasbascunan/CONTROL2ANALISIS | R | false | false | 1,077 | r | #Definir directorio
setwd("C:/Users/familia CB/Desktop/Datos Control")
#Abrir archivo
file <- paste0(getwd(),"/Control2.csv")
data <- read.csv2("Control2.csv", head= TRUE, sep = ';')
str(data)
#3.- a)Retorno esperado para cada empresa
RIGPA <- mean(data$RETORNO.IGPA)
RIGPA
RSMSAAM <- mean(data$RETORNO.SM.SAAM)
RSMSAAM
RSQM.B <- mean(data$RETORNO.SQM.B)
RSQM.B
RVAPORES <- mean(data$RETORNO.VAPORES)
RVAPORES
#b)Desviacion estandar de los retornos de cada empresa
DsIGPA <- sd(data$RETORNO.IGPA)
DsIGPA
DsSMSAAM <- sd(data$RETORNO.SM.SAAM)
DsSMSAAM
DsSQM.B <- sd(data$RETORNO.SQM.B)
DsSQM.B
DsVAPORES <-sd(data$RETORNO.VAPORES)
DsVAPORES
#c)Matriz de correlacion
x <- data.frame(data$RETORNO.IGPA, data$RETORNO.SM.SAAM, data$RETORNO.SQM.B, data$RETORNO.VAPORES)
cor(x)
#4.- Regresion para cada empresa
#SM.SAAM.S.A.
regSMSAAM <- lm(data$RETORNO.SM.SAAM ~ data$RETORNO.IGPA, data=data)
regSMSAAM
#SQM-B
regSQM.B <- lm(data$RETORNO.SQM.B ~ data$RETORNO.IGPA, data=data)
regSQM.B
#VAPORES
regVAPORES <- lm(data$RETORNO.VAPORES ~ data$RETORNO.IGPA, data=data)
regVAPORES |
#' @name methods_localjc
#' @title Methods for class localjc
#' @description The \code{plot()} function allows the user to plot significant observations.
#' The \code{print()} function is used to print the number of runs in each localization. Additional information of
#' expected values and standard deviation, z-value ans p-value is prited for each observation.
#'
#' @param x a \code{localjc} object created by \code{\link{Q.test}}.
#' @param sig significant level for each observation in \code{plot()} method. Default \code{sig = 0.05}
#' @param sf optional argument for \code{plot()} method to include a sf object (default = NULL)
#' @param coor optional argument for \code{plot()} method to include coordinates of points (default = NULL)
#' @param ... further arguments passed to or from other methods.
#' @return No return value, called for side effects
#' @examples
#' # Example 1: Local spatial runs test based on knn
#' library(lwgeom)
#' N <- 100
#' cx <- runif(N)
#' cy <- runif(N)
#' x <- cbind(cx,cy)
#' listw <- spdep::knearneigh(cbind(cx,cy), k = 10)
#' p <- c(1/6,3/6,2/6)
#' rho <- 0.5
#' fx <- dgp.spq(p = p, listw = listw, rho = rho)
#'
#' # Asymtotic version
#' lsrq <- local.sp.runs.test(fx = fx, listw = listw, alternative = "less")
#' print(lsrq)
#' plot(lsrq, sig = 0.05)
#'
#' @author
#' \tabular{ll}{
#' Fernando López \tab \email{fernando.lopez@@upct.es} \cr
#' Román Mínguez \tab \email{roman.minguez@@uclm.es} \cr
#' Antonio Páez \tab \email{paezha@@gmail.com} \cr
#' Manuel Ruiz \tab \email{manuel.ruiz@@upct.es} \cr
#' }
#' @references
#' \itemize{
#' \item Ruiz, M., López, F., and Páez, A. (2021).
#' A test for global and local homogeneity of categorical data based on spatial runs.
#' \emph{working paper}.
#' }
#'
NULL
#' @name print.localjc
#' @rdname methods_localjc
#' @export
print.localjc <- function(x, ...) {
if (!inherits(x, "localjc")) stop("Argument must be a localjc object")
print(x$ljc)
invisible(x)
}
#' @name plot.localjc
#' @rdname methods_localjc
#' @export
#'
#'
#'
plot.localjc <- function(x, ..., sf = NULL, coor = NULL, sig = 0.05){
if (!inherits(x, "localjc")) stop("Argument must be a localjc object")
# if (x$sf == TRUE & is.null(sf)) stop("Include the sf object using the sf argument")
ljc <- x
a <- as.factor((ljc$ljc$pseudo.value < sig)*1)
#####################
### Plot JC Local
#####################
# if (is.null(sf)){
# if (is.null(coor) &&
# (inherits(lsrq$listw, "knn"))){
# coor <- as.data.frame(lsrq$listw$x)
# }
# if (!is.null(coor) &&
# (!inherits(lsrq$listw, "knn"))){
# coor <- as.data.frame(coor)
# }
# sf <- st_as_sf(coor,coords = names(coor))
# mysize = 4
# }
if (is.null(sf)){
if (!is.null(coor)){
coor <- as.data.frame(coor)
sf <- st_as_sf(coor,coords = names(coor))
mysize = 4
}
}
if (!is.null(sf)){
if (inherits(st_geometry(sf),
"sfc_MULTIPOLYGON")) mysize = .2
if (inherits(st_geometry(sf),
"sfc_POLYGON")) mysize = .2
if (inherits(st_geometry(sf),
"sfc_POINT")) mysize = 4
}
sf$levels <- addNA(a)
levels(sf$levels)[is.na(levels(sf$levels))] <- "NA"
levels(sf$levels)[levels(sf$levels)=="0"] <- "non-sig"
levels(sf$levels)[levels(sf$levels)=="1"] <- "sig"
cols <- c("NA" = "white", "non-sig" = "grey77", "sig" = "red")
plot_jc <- ggplot(sf) +
geom_sf(aes(fill = levels),
color = "black", shape = 21,
size = mysize) +
theme_bw() +
theme(axis.text.x = element_blank(),
axis.text.y = element_blank()) +
xlab(paste0("Significance p-value = ",
sig)) +
scale_fill_manual(values = cols,
na.value ="grey",
drop = FALSE)
plot_jc
}
| /R/methods_localjc.R | permissive | f8l5h9/spqdep | R | false | false | 3,841 | r | #' @name methods_localjc
#' @title Methods for class localjc
#' @description The \code{plot()} function allows the user to plot significant observations.
#' The \code{print()} function is used to print the number of runs in each localization. Additional information of
#' expected values and standard deviation, z-value ans p-value is prited for each observation.
#'
#' @param x a \code{localjc} object created by \code{\link{Q.test}}.
#' @param sig significant level for each observation in \code{plot()} method. Default \code{sig = 0.05}
#' @param sf optional argument for \code{plot()} method to include a sf object (default = NULL)
#' @param coor optional argument for \code{plot()} method to include coordinates of points (default = NULL)
#' @param ... further arguments passed to or from other methods.
#' @return No return value, called for side effects
#' @examples
#' # Example 1: Local spatial runs test based on knn
#' library(lwgeom)
#' N <- 100
#' cx <- runif(N)
#' cy <- runif(N)
#' x <- cbind(cx,cy)
#' listw <- spdep::knearneigh(cbind(cx,cy), k = 10)
#' p <- c(1/6,3/6,2/6)
#' rho <- 0.5
#' fx <- dgp.spq(p = p, listw = listw, rho = rho)
#'
#' # Asymtotic version
#' lsrq <- local.sp.runs.test(fx = fx, listw = listw, alternative = "less")
#' print(lsrq)
#' plot(lsrq, sig = 0.05)
#'
#' @author
#' \tabular{ll}{
#' Fernando López \tab \email{fernando.lopez@@upct.es} \cr
#' Román Mínguez \tab \email{roman.minguez@@uclm.es} \cr
#' Antonio Páez \tab \email{paezha@@gmail.com} \cr
#' Manuel Ruiz \tab \email{manuel.ruiz@@upct.es} \cr
#' }
#' @references
#' \itemize{
#' \item Ruiz, M., López, F., and Páez, A. (2021).
#' A test for global and local homogeneity of categorical data based on spatial runs.
#' \emph{working paper}.
#' }
#'
NULL
#' @name print.localjc
#' @rdname methods_localjc
#' @export
print.localjc <- function(x, ...) {
if (!inherits(x, "localjc")) stop("Argument must be a localjc object")
print(x$ljc)
invisible(x)
}
#' @name plot.localjc
#' @rdname methods_localjc
#' @export
#'
#'
#'
plot.localjc <- function(x, ..., sf = NULL, coor = NULL, sig = 0.05){
if (!inherits(x, "localjc")) stop("Argument must be a localjc object")
# if (x$sf == TRUE & is.null(sf)) stop("Include the sf object using the sf argument")
ljc <- x
a <- as.factor((ljc$ljc$pseudo.value < sig)*1)
#####################
### Plot JC Local
#####################
# if (is.null(sf)){
# if (is.null(coor) &&
# (inherits(lsrq$listw, "knn"))){
# coor <- as.data.frame(lsrq$listw$x)
# }
# if (!is.null(coor) &&
# (!inherits(lsrq$listw, "knn"))){
# coor <- as.data.frame(coor)
# }
# sf <- st_as_sf(coor,coords = names(coor))
# mysize = 4
# }
if (is.null(sf)){
if (!is.null(coor)){
coor <- as.data.frame(coor)
sf <- st_as_sf(coor,coords = names(coor))
mysize = 4
}
}
if (!is.null(sf)){
if (inherits(st_geometry(sf),
"sfc_MULTIPOLYGON")) mysize = .2
if (inherits(st_geometry(sf),
"sfc_POLYGON")) mysize = .2
if (inherits(st_geometry(sf),
"sfc_POINT")) mysize = 4
}
sf$levels <- addNA(a)
levels(sf$levels)[is.na(levels(sf$levels))] <- "NA"
levels(sf$levels)[levels(sf$levels)=="0"] <- "non-sig"
levels(sf$levels)[levels(sf$levels)=="1"] <- "sig"
cols <- c("NA" = "white", "non-sig" = "grey77", "sig" = "red")
plot_jc <- ggplot(sf) +
geom_sf(aes(fill = levels),
color = "black", shape = 21,
size = mysize) +
theme_bw() +
theme(axis.text.x = element_blank(),
axis.text.y = element_blank()) +
xlab(paste0("Significance p-value = ",
sig)) +
scale_fill_manual(values = cols,
na.value ="grey",
drop = FALSE)
plot_jc
}
|
## This script downloads a zip file from Uc Irvine Machine Learning Repository,
## which contains household power consumption data from one household with a
## one-minute sampling rate over a period of 4 years.
## This script produces a .png file containing a histogram plot of the
## Global active power data in kilowatts for 1-Feb-2007 and 2-Feb-2007
## WARNINGS ##
## this script requires the use of the plyr package
## make sure all graphics devices are off (using dev.cur() and dev.off())
## library(plyr)
## check to see if directory exists, otherwise create it
if (!file.exists("data")) {
dir.create("data")
}
## download UCI HAR dataset zip file
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "./data/HPC.zip")
dateDownloaded <- date()
## unzip the file to the same directory
unzip("./data/HPC.zip", exdir = "./data")
## set colClasses
tab5rows <- read.table("./data/household_power_consumption.txt", header = TRUE,
sep = ";", nrows = 5)
classes <- sapply(tab5rows, class)
# housekeeping
rm(tab5rows)
## read file using ColClasses
hpc <- read.table("./data/household_power_consumption.txt", header = TRUE,
sep = ";", na.strings = "?", colClasses = classes,
comment.char = "")
## create a data set for Feb 1 and Feb 2 2007 with single field for date-time
hpc$Date <- as.Date(as.character(hpc$Date), "%d/%m/%Y")
hpcdata <- hpc[ which(hpc$Date == "2007-02-01" | hpc$Date == "2007-02-02"), ]
hpcdata <- transform(hpcdata, DateTime =
as.POSIXlt(paste(as.character(hpcdata$Date),
as.character(hpcdata$Time))))
hpcdata <- hpcdata[ , c(10, 3:9)]
row.names(hpcdata) <- NULL # remove unneeded row names
# housekeeping - remove unneeded data
rm(hpc)
rm(classes)
## reset margins
par(mar = c(5,4,4,2))
## open png file
png("plot1.png", width = 480, height = 480, units = "px")
## Construct Plot 1
hist(hpcdata$Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
dev.off() # close PNG device
| /plot1.R | no_license | kbupright/ExData_Plotting1 | R | false | false | 2,201 | r | ## This script downloads a zip file from Uc Irvine Machine Learning Repository,
## which contains household power consumption data from one household with a
## one-minute sampling rate over a period of 4 years.
## This script produces a .png file containing a histogram plot of the
## Global active power data in kilowatts for 1-Feb-2007 and 2-Feb-2007
## WARNINGS ##
## this script requires the use of the plyr package
## make sure all graphics devices are off (using dev.cur() and dev.off())
## library(plyr)
## check to see if directory exists, otherwise create it
if (!file.exists("data")) {
dir.create("data")
}
## download UCI HAR dataset zip file
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "./data/HPC.zip")
dateDownloaded <- date()
## unzip the file to the same directory
unzip("./data/HPC.zip", exdir = "./data")
## set colClasses
tab5rows <- read.table("./data/household_power_consumption.txt", header = TRUE,
sep = ";", nrows = 5)
classes <- sapply(tab5rows, class)
# housekeeping
rm(tab5rows)
## read file using ColClasses
hpc <- read.table("./data/household_power_consumption.txt", header = TRUE,
sep = ";", na.strings = "?", colClasses = classes,
comment.char = "")
## create a data set for Feb 1 and Feb 2 2007 with single field for date-time
hpc$Date <- as.Date(as.character(hpc$Date), "%d/%m/%Y")
hpcdata <- hpc[ which(hpc$Date == "2007-02-01" | hpc$Date == "2007-02-02"), ]
hpcdata <- transform(hpcdata, DateTime =
as.POSIXlt(paste(as.character(hpcdata$Date),
as.character(hpcdata$Time))))
hpcdata <- hpcdata[ , c(10, 3:9)]
row.names(hpcdata) <- NULL # remove unneeded row names
# housekeeping - remove unneeded data
rm(hpc)
rm(classes)
## reset margins
par(mar = c(5,4,4,2))
## open png file
png("plot1.png", width = 480, height = 480, units = "px")
## Construct Plot 1
hist(hpcdata$Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
dev.off() # close PNG device
|
#
# Function to "curve" a threshold due to spillover
#
# NOTE: This function operates in linear coordinates.
#
# pname: the name of the parameter of interest
# pvalue: the value of the given parameter to be tested (is it above or below the curved threshold?)
# threshold.zero: the initial threshold value, to be adjusted by adding the spillover spread
# spill.matrix: the spillover matrix
# fac: a factor by which to scale the spillover spread
spill.threshold = function (pname, pvalue, threshold.zero, spill.matrix, fac=3) {
} | /computils/spill_threshold.R | no_license | rogerswt/tools | R | false | false | 534 | r | #
# Function to "curve" a threshold due to spillover
#
# NOTE: This function operates in linear coordinates.
#
# pname: the name of the parameter of interest
# pvalue: the value of the given parameter to be tested (is it above or below the curved threshold?)
# threshold.zero: the initial threshold value, to be adjusted by adding the spillover spread
# spill.matrix: the spillover matrix
# fac: a factor by which to scale the spillover spread
spill.threshold = function (pname, pvalue, threshold.zero, spill.matrix, fac=3) {
} |
# Title: Data exploration
# Author: JAG
# Date: 21-Aug-20
# Notes
# Zach - something weird is happening with China (it appears twice) and even when adjusting for that,
# it seems like the percent of population there is too high
#____________________________________________________________________________________________________#
# Load pacakges
#____________________________________________________________________________________________________#
library(tidyverse)
library(ggplot2)
library(countrycode)
library(ineq)
library(REAT)
#____________________________________________________________________________________________________#
# Load data
#____________________________________________________________________________________________________#
df <- read.csv("all_national_indicators.csv")
df <- df %>%
filter(!is.na(iso3c)) # FIX IT: Ask Zach about NAs in ISO column
pop <- read.csv("FAOSTAT_population_data_8-21-2020.csv")
#____________________________________________________________________________________________________#
# Filter population and join to df
#____________________________________________________________________________________________________#
pop$iso3c <- countrycode(pop$Area, origin = "country.name", destination = "iso3c")
# FIX IT: Check unmatched countries with country list in df
pop_06_16 <- pop %>%
filter(Year %in% c(2006:2016)) %>%
group_by(iso3c) %>%
summarise(pop_06_16 = 1000*sum(Value)/length(c(2006:2016)))
pop_14_16 <- pop %>%
filter(Year %in% c(2014:2016)) %>%
group_by(iso3c) %>%
summarise(pop_14_16 = 1000*sum(Value)/length(c(2014:2016)))
df <- df %>%
select(iso3c, fish_supply_daily_g_protein_percap, mean_exports_USD1000, mean_exports_tonnes,
mean_capture_production, mean_aquaculture_production_freshwater, mean_aquaculture_production_brackish,
mean_aquaculture_production_marine) %>%
left_join(pop_06_16, by = "iso3c") %>%
left_join(pop_14_16, by = "iso3c")
# Replace NA's with zeros in appropriate columns (need to check this carefully--> currently just doing everywhere)
df[is.na(df)] <- 0
# Convert consumption to total
df$total_supply_gprotein <- df$fish_supply_daily_g_protein_percap*df$pop_14_16
#____________________________________________________________________________________________________#
# Plot Lorenz curves
#____________________________________________________________________________________________________#
plot.lc <- function(plot.df, plot.col, pop.col){
plot.df <- df
colnames(plot.df)[colnames(plot.df) == plot.col] <- "value"
colnames(plot.df)[colnames(plot.df) == pop.col] <- "pop"
plot.df <- plot.df %>%
select(iso3c, value, pop) %>%
group_by(iso3c) %>%
summarise(value = mean(value), pop = mean(pop)) %>%
mutate(value_percap = value/pop) %>%
filter(pop >0)
par(mfrow=c(2,1))
hist(plot.df$value_percap, breaks = 25, main = paste(plot.col, "\n Histogram"),
xlab = "Benefit per capita")
gini(plot.df$value_percap, weighting=plot.df$pop, lc = TRUE,
lctitle = paste("Lorenz Curve, Gini =", round(gini(plot.df$value_percap, weighting=plot.df$pop), 2)),
lcx = "% of population", lcy = "% of benefit",
le.col = "black", lc.col = "black",
lsize = 1, ltype = "solid",
bg.col = "white", bgrid = FALSE)
}
# Prodcution
plot.lc(plot.df = df, plot.col = "mean_capture_production", pop.col = "pop_06_16")
plot.lc(plot.df = df, plot.col = "mean_aquaculture_production_freshwater", pop.col = "pop_06_16")
plot.lc(plot.df = df, plot.col = "mean_aquaculture_production_brackish", pop.col = "pop_06_16")
plot.lc(plot.df = df, plot.col = "mean_aquaculture_production_marine", pop.col = "pop_06_16")
# Distribution
plot.lc(plot.df = df, plot.col = "mean_exports_USD1000", pop.col = "pop_06_16")
plot.lc(plot.df = df, plot.col = "mean_exports_tonnes", pop.col = "pop_06_16")
# Consumption
plot.lc(plot.df = df, plot.col = "total_supply_gprotein", pop.col = "pop_14_16")
| /gini_analysis/explore_data.R | no_license | shinnakayama/aquatic_food_justice_model | R | false | false | 3,975 | r | # Title: Data exploration
# Author: JAG
# Date: 21-Aug-20
# Notes
# Zach - something weird is happening with China (it appears twice) and even when adjusting for that,
# it seems like the percent of population there is too high
#____________________________________________________________________________________________________#
# Load pacakges
#____________________________________________________________________________________________________#
library(tidyverse)
library(ggplot2)
library(countrycode)
library(ineq)
library(REAT)
#____________________________________________________________________________________________________#
# Load data
#____________________________________________________________________________________________________#
df <- read.csv("all_national_indicators.csv")
df <- df %>%
filter(!is.na(iso3c)) # FIX IT: Ask Zach about NAs in ISO column
pop <- read.csv("FAOSTAT_population_data_8-21-2020.csv")
#____________________________________________________________________________________________________#
# Filter population and join to df
#____________________________________________________________________________________________________#
pop$iso3c <- countrycode(pop$Area, origin = "country.name", destination = "iso3c")
# FIX IT: Check unmatched countries with country list in df
pop_06_16 <- pop %>%
filter(Year %in% c(2006:2016)) %>%
group_by(iso3c) %>%
summarise(pop_06_16 = 1000*sum(Value)/length(c(2006:2016)))
pop_14_16 <- pop %>%
filter(Year %in% c(2014:2016)) %>%
group_by(iso3c) %>%
summarise(pop_14_16 = 1000*sum(Value)/length(c(2014:2016)))
df <- df %>%
select(iso3c, fish_supply_daily_g_protein_percap, mean_exports_USD1000, mean_exports_tonnes,
mean_capture_production, mean_aquaculture_production_freshwater, mean_aquaculture_production_brackish,
mean_aquaculture_production_marine) %>%
left_join(pop_06_16, by = "iso3c") %>%
left_join(pop_14_16, by = "iso3c")
# Replace NA's with zeros in appropriate columns (need to check this carefully--> currently just doing everywhere)
df[is.na(df)] <- 0
# Convert consumption to total
df$total_supply_gprotein <- df$fish_supply_daily_g_protein_percap*df$pop_14_16
#____________________________________________________________________________________________________#
# Plot Lorenz curves
#____________________________________________________________________________________________________#
plot.lc <- function(plot.df, plot.col, pop.col){
plot.df <- df
colnames(plot.df)[colnames(plot.df) == plot.col] <- "value"
colnames(plot.df)[colnames(plot.df) == pop.col] <- "pop"
plot.df <- plot.df %>%
select(iso3c, value, pop) %>%
group_by(iso3c) %>%
summarise(value = mean(value), pop = mean(pop)) %>%
mutate(value_percap = value/pop) %>%
filter(pop >0)
par(mfrow=c(2,1))
hist(plot.df$value_percap, breaks = 25, main = paste(plot.col, "\n Histogram"),
xlab = "Benefit per capita")
gini(plot.df$value_percap, weighting=plot.df$pop, lc = TRUE,
lctitle = paste("Lorenz Curve, Gini =", round(gini(plot.df$value_percap, weighting=plot.df$pop), 2)),
lcx = "% of population", lcy = "% of benefit",
le.col = "black", lc.col = "black",
lsize = 1, ltype = "solid",
bg.col = "white", bgrid = FALSE)
}
# Prodcution
plot.lc(plot.df = df, plot.col = "mean_capture_production", pop.col = "pop_06_16")
plot.lc(plot.df = df, plot.col = "mean_aquaculture_production_freshwater", pop.col = "pop_06_16")
plot.lc(plot.df = df, plot.col = "mean_aquaculture_production_brackish", pop.col = "pop_06_16")
plot.lc(plot.df = df, plot.col = "mean_aquaculture_production_marine", pop.col = "pop_06_16")
# Distribution
plot.lc(plot.df = df, plot.col = "mean_exports_USD1000", pop.col = "pop_06_16")
plot.lc(plot.df = df, plot.col = "mean_exports_tonnes", pop.col = "pop_06_16")
# Consumption
plot.lc(plot.df = df, plot.col = "total_supply_gprotein", pop.col = "pop_14_16")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prefecture-city.R
\name{get_cities_map}
\alias{get_cities_map}
\title{get_cities_map}
\usage{
get_cities_map(adcode)
}
\arguments{
\item{adcode}{a numeric or a character of length one, the adcode of a province.}
}
\value{
a sf object, the boundary of prefecture cities for a province.
}
\description{
get_cities_map
}
| /man/get_cities_map.Rd | no_license | swcyo/alimap | R | false | true | 396 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prefecture-city.R
\name{get_cities_map}
\alias{get_cities_map}
\title{get_cities_map}
\usage{
get_cities_map(adcode)
}
\arguments{
\item{adcode}{a numeric or a character of length one, the adcode of a province.}
}
\value{
a sf object, the boundary of prefecture cities for a province.
}
\description{
get_cities_map
}
|
scrapearchive <- function(address) {
#scrapearchive will look at an archived version of the pencil page and will extract pencil names and prices
#exampleuse
# test4 <- scrapearchive("https://web.archive.org/web/20140417064443/http://www.cultpens.com/acatalog/Pencils.html")
library(rvest)
#get date out, ignore time as unlikely for price to have changed so not worth recording this info
archivedate <- regmatches(address, gregexpr("20[0-9]{6}",address))
#pencils <- html("https://web.archive.org/web/20130730001143/http://www.cultpens.com/acatalog/Pencils.html")
pencils <- html(address)
# pencil product names
pnames <-
pencils %>%
html_nodes("p a") %>%
html_text()
# web page formatting will result in empty lines
# remove empty lines
pnames <- grep ("[a-z]", pnames, value=TRUE)
# product names into vector pnames
paragraphs <-
pencils %>%
html_nodes("p") %>%
html_text()
#remove all entries without a pound sign
paragraphs <- grep ("£", paragraphs, value=TRUE)
# only keep prices
t1 <- regmatches(paragraphs, gregexpr("£([0-9])+.[0-9][0-9]",paragraphs))
# only keep first price
price = do.call("rbind", lapply(t1, "[[", 1))
# both vectors into a dataframe
tryCatch({
data.frame(name=pnames, price=price, date=unlist(archivedate))
}, error=function(e) data.frame(name=character(0),
price=numeric(0),
date=character(0)))
} | /scrapearchive.R | no_license | memm74/Web-Scraping | R | false | false | 1,509 | r | scrapearchive <- function(address) {
#scrapearchive will look at an archived version of the pencil page and will extract pencil names and prices
#exampleuse
# test4 <- scrapearchive("https://web.archive.org/web/20140417064443/http://www.cultpens.com/acatalog/Pencils.html")
library(rvest)
#get date out, ignore time as unlikely for price to have changed so not worth recording this info
archivedate <- regmatches(address, gregexpr("20[0-9]{6}",address))
#pencils <- html("https://web.archive.org/web/20130730001143/http://www.cultpens.com/acatalog/Pencils.html")
pencils <- html(address)
# pencil product names
pnames <-
pencils %>%
html_nodes("p a") %>%
html_text()
# web page formatting will result in empty lines
# remove empty lines
pnames <- grep ("[a-z]", pnames, value=TRUE)
# product names into vector pnames
paragraphs <-
pencils %>%
html_nodes("p") %>%
html_text()
#remove all entries without a pound sign
paragraphs <- grep ("£", paragraphs, value=TRUE)
# only keep prices
t1 <- regmatches(paragraphs, gregexpr("£([0-9])+.[0-9][0-9]",paragraphs))
# only keep first price
price = do.call("rbind", lapply(t1, "[[", 1))
# both vectors into a dataframe
tryCatch({
data.frame(name=pnames, price=price, date=unlist(archivedate))
}, error=function(e) data.frame(name=character(0),
price=numeric(0),
date=character(0)))
} |
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 11260
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11259
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11259
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt44_40_387.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3326
c no.of clauses 11260
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 11259
c
c QBFLIB/Basler/terminator/stmt44_40_387.qdimacs 3326 11260 E1 [1] 0 269 3056 11259 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/terminator/stmt44_40_387/stmt44_40_387.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 718 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 11260
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11259
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11259
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt44_40_387.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3326
c no.of clauses 11260
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 11259
c
c QBFLIB/Basler/terminator/stmt44_40_387.qdimacs 3326 11260 E1 [1] 0 269 3056 11259 RED
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_quality_stats.R
\name{modelqualstats}
\alias{modelqualstats}
\title{Compute Model Quality Information}
\usage{
modelqualstats(fit, holdoutXocc, holdoutyXobs, ModelSite, cl)
}
\arguments{
\item{fit}{a model fitted using run.detectionoccupancy}
\item{holdoutXocc}{A data frame of occupancy predictors for sites.}
\item{holdoutyXobs}{A data frame of detection information and species detections}
\item{ModelSite}{Column names used to match the holdoutyXobs to sites in holdoutXocc}
\item{cl}{A cluster created by parallel::makeCluster}
}
\description{
Compute Model Quality Information
}
| /man/modelqualstats.Rd | no_license | sustainablefarms/linking-data | R | false | true | 673 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_quality_stats.R
\name{modelqualstats}
\alias{modelqualstats}
\title{Compute Model Quality Information}
\usage{
modelqualstats(fit, holdoutXocc, holdoutyXobs, ModelSite, cl)
}
\arguments{
\item{fit}{a model fitted using run.detectionoccupancy}
\item{holdoutXocc}{A data frame of occupancy predictors for sites.}
\item{holdoutyXobs}{A data frame of detection information and species detections}
\item{ModelSite}{Column names used to match the holdoutyXobs to sites in holdoutXocc}
\item{cl}{A cluster created by parallel::makeCluster}
}
\description{
Compute Model Quality Information
}
|
# Script for processing results from final model simulations
library(rstanarm)
library(xtable)
library(ggplot2)
library(reshape2)
library(plyr)
theme_set(theme_bw())
# Helper function for conversion from df to matrix
matrix.plz <- function(x) {
m <- as.matrix(x[,-1])
rownames(m) <- as.character(x[,1])
m
}
# Input, params and output
if(exists("param1")) { # Anduril
RESULTDIR <- param1
thresh <- param2 # e.g. U0.2_a0.9
PLOTDIR <- document.dir
LATEX.CSV <- get.output(cf,'optOut1')
COEFS.CSV <- get.output(cf,'optOut2')
cv.chosen.vars <- table1
prot <- table2
gene <- table3
mirna <- table4
rm(optOut1)
rm(optOut2)
} else { # non-Anduril
RESULTDIR <- "/home/viljami/wrk/finalmodel_results"
PLOTDIR <- "/home/viljami/wrk/finalmodel_results/plots"
OUTFILE <- "/home/viljami/wrk/finalmodel_results/foo.csv"
LATEX.CSV <- "/home/viljami/wrk/finalmodel_results/ltable.csv"
COEFS.CSV <- "/home/viljami/wrk/finalmodel_results/coefs.csv"
WRKDIR <- Sys.getenv("WRKDIR")
cv.chosen.vars <- read.delim(file.path(WRKDIR,"dippa-data","n_chosen_variables.csv"))
prot <- read.delim(file.path(WRKDIR,"dippa-data","protein_normalized.csv"))
gene <- read.delim(file.path(WRKDIR,"dippa-data","gene_normalized.csv"))
mirna <- read.delim(file.path(WRKDIR,"dippa-data","mirna_normalized.csv"))
if(!dir.exists(PLOTDIR)) dir.create(PLOTDIR)
thresh <- "U0.2_a0.5"
}
SIGDIGS <- 3
# Sanity checking
if(!(thresh %in% colnames(cv.chosen.vars)))
stop(sprintf("Threshold '%s' not available!", thresh))
# Convert experssion data into matrices
samples <- as.character(prot[,1])
prot <- matrix.plz(prot)[samples,]
gene <- matrix.plz(gene)[samples,]
mirna <- matrix.plz(mirna)[samples,]
# Get result file names
resfiles <- list.files(RESULTDIR, pattern = "*.rda")
# Get gene names from filenames
genes <- sub("finalmodel-\\d+-(\\w+).rda", "\\1", resfiles)
# Sort into alphabetical order by gene
gord <- order(genes)
genes <- genes[gord]
resfiles <- resfiles[gord]
rm(gord)
# Result table data frame
d <- data.frame(gene=genes, R2_gene=0, R2adj_gene=0,
R2_full=NA, R2adj_full=NA, full_model_found="",
gene_only_significant="", gene_full_significant="",
n_miRNAs=0, chosen_miRNAs="",
n_significant_miRNAs=0, significant_miRNAs="", significant_miRNA_inds="", stringsAsFactors=F)
# Data frame for coefs for all models (compile as a list)
d.coefs <- list()
for (i in 1:length(resfiles)) {
#for (i in 1:2) {
f <- resfiles[i]
g <- genes[i]
# Load CV results for current gene
load(file.path(RESULTDIR,f))
# Get number of covariates chosen in CV (includes gene!)
n_vars <- cv.chosen.vars[match(g, cv.chosen.vars$gene), thresh]
if(!is.na(n_vars) && n_vars > params$n_vars) {
warning(sprintf("%s : n_vars too big (%d > %d)!",g, n_vars, params$n_vars))
n_vars <- NA
}
d[i,"n_miRNAs"] <- max(n_vars - 1, 0)
d[i,"full_model_found"] <- ifelse(!is.na(n_vars), "yes", "no")
# Gene only model results
d[i,"R2_gene"] <- r2.gene["R2"]
d[i,"R2adj_gene"] <- r2.gene["R2.adjusted"]
# Check if gene significant for gene only model
postsum.gene <- posterior_interval(fit.gene, prob = 0.95)
signifs <- sign(postsum.gene["G",1]) == sign(postsum.gene["G",2])
d[i,"gene_only_significant"] <- ifelse(signifs, "yes", "no")
# Full model results
if(!is.na(n_vars)) {
if(n_vars > 1) {
# Get names of chosen mirnas
mirna_vars <- spath$chosen[3:(n_vars+1)]-2 #1 is constant, 2 is gene
mirna_names <- colnames(mirna)[mirna_vars]
mirna_names <- gsub("\\.", "-", sub("\\.$", "\\*", mirna_names))
d[i,"chosen_miRNAs"] <- paste(mirna_names, collapse=",")
x <- cbind(1, gene[,g], mirna[,mirna_vars])
} else {
# no miRNAs chosen
mirna_names <- c()
x <- cbind(1, gene[,g])
}
# Projected w's weren't computed for w0+g-models!
# So R2's can't be computed (instead NA).
# If that is corrected, then remove this if (and else)!
if(n_vars > 1) {
# Create regression matrices
x <- x[,1:(n_vars+1)] # drops gene if only constant selected
y <- prot[,g]
n <- length(y)
# Get projected weights for chosen mirnas
w <- as.matrix(spath$w[[n_vars+1]][spath$chosen[1:(n_vars+1)],])
# Compute predicted prot expr
ypred <- x %*% w
# Compute R2 and adjusted R2
R2 <- 1 - colSums((y-ypred)^2)/sum((y-mean(y))^2)
R2.adj <- 1 - (1-R2)*(n-1)/(n-n_vars+1)
resid <- y-ypred
R2.var <- 1-apply(resid,2,var)/var(y)
R2.var.adj <- 1 - (1-R2.var)*(n-1)/(n-n_vars+1)
d[i,"R2_full"] <- median(R2)
d[i,"R2adj_full"] <- median(R2.adj)
# Posterior summary for projected weights
postsum <- t(apply(w, 1, quantile, probs=c(0.025,0.1,0.25,0.5,0.75,0.9,0.975)))
rownames(postsum) <- c("w0", "gene", mirna_names)[1:(n_vars+1)]
sdev <- as.vector(apply(w, 1, sd))
wgt <- as.vector(apply(w, 1, function(x) {ifelse(median(x)>0, sum(x>0)/length(x), sum(x<0)/length(x))}))
signifs <- sign(postsum[,"2.5%"]) == sign(postsum[,"97.5%"])
cfs <- data.frame(gene=g, variable=rownames(postsum), median=postsum[,"50%"], IQR=postsum[,"75%"]-postsum[,"25%"], sd=sdev, significant=ifelse(signifs, "yes", "no"), weight=wgt)
d.coefs <- c(d.coefs, list(cfs))
if(n_vars > 0) {
d[i,"gene_full_significant"] <- ifelse(signifs["gene"], "yes", "no")
if(n_vars > 1) {
d[i,"n_significant_miRNAs"] <- sum(signifs[3:length(signifs)])
d[i,"significant_miRNAs"] <- paste(grep("miR",rownames(postsum)[signifs], value=T), collapse=",")
d[i,"significant_miRNA_inds"] <- paste(which(signifs[3:length(signifs)]), collapse=",")
}
}
} else {
d[i,"R2_full"] <- NA
d[i,"R2adj_full"] <- NA
}
}
}
# Output
table.out <- d
if(!exists("param1")) { # non-Anduril
write.table(table.out, file=OUTFILE, sep="\t", row.names=F)
}
d.coefs <- do.call(rbind, d.coefs)
write.table(d.coefs, file=COEFS.CSV, sep="\t", row.names=F)
# Make a version of the table for latex in Anduril
numf <- paste0("%.",SIGDIGS,"f")
dlat <- d[,c("gene","R2_gene","R2_full","n_miRNAs","significant_miRNAs")]
# Combine n_miRNAs and n_significant_miRNAs
nonNA <- !is.na(dlat$n_miRNAs) & dlat$n_miRNAs > 0
dlat$n_miRNAs[nonNA] <- paste0(d$n_miRNAs, " (", d$n_significant_miRNAs, ")")[nonNA]
# Add gene significants as asterisks
gene_only_sigs <- revalue(d$gene_only_significant, c(yes="$^{\\ast}$", no=""))
gene_full_sigs <- revalue(d$gene_full_significant, c(yes="$^{\\ast}$", no=""))
dlat$R2_gene <- paste0(sprintf(numf, d$R2_gene), " (", sprintf(numf, d$R2adj_gene), ")", gene_only_sigs)
dlat$R2_full[nonNA] <- paste0(sprintf(numf, d$R2_full), " (", sprintf(numf, d$R2adj_full), ")", gene_full_sigs)[nonNA]
# Round R2_adjs
write.table(dlat, file=LATEX.CSV, sep="\t", row.names=F)
# Make a plot of var num vs R2 and num signif
# #dm <- subset(d, select=c(gene, n_miRNAs, n_significant_miRNAs, R2_full, R2_full_adj))
# dm <- subset(d, select=c(gene, n_miRNAs, n_significant_miRNAs, R2_full))
# dm$delta_R2adj <- d$R2adj_full - d$R2adj_gene
# dm <- melt(dm, id.vars=c("gene","n_miRNAs"))
# #dm$adjusted[dm$variable == "R2_full"] <- "no"
# #dm$adjusted[dm$variable == "R2_full_adj"] <- "yes"
# #dm$adjusted[dm$variable == "n_significant_miRNAs"] <- "NA"
# #dm$variable[dm$variable == "R2_full_adj"] <- "R2_full"
# # Pretty the variable names
# dm$variable <- revalue(dm$variable, c(
# n_significant_miRNAs = "N~significant~miRNAs",
# R2_full = "R[full]^2",
# delta_R2adj = "Delta~bar(R)^2"
# ))
# g <- ggplot(dm, aes(x = n_miRNAs, y = value))#, color=adjusted, size=adjusted))
# g <- g + geom_point(alpha=0.6) + geom_line(stat="smooth", method="loess", alpha=0.3)
# g <- g + geom_ribbon(stat="smooth", method="loess", alpha=0.05)##, aes(color=NULL, group=adjusted))
# g <- g + facet_grid(variable ~ ., scales = "free", switch="y", labeller=label_parsed)
# #g <- g + scale_size_manual(values=c(1,1,0.5), guide=F)
# #g <- g + scale_color_discrete(name="R2 adjusted", breaks=c("no","yes"))
# #g <- g + ggtitle(parse(text=sub("U(.*)_a(.*)", "alpha:\\2~~~gamma:\\1", thresh)))
# g <- g + labs(x="N miRNAs", y=NULL)
# g <- g + theme(strip.text.y = element_text(size = 12))
# plot.file <- file.path(PLOTDIR, sprintf("n_miRNAs_R2s_%s.pdf", thresh))
# ggsave(plot.file, g, height=7, width=9, dpi=600)
# Make a latex table version in R (NOT USED)
dltx <- dlat
#colnames(dltx) <- c("Gene","$R^2_{gene}$","$R^2_{full}$","$\\bar{R}^2_{gene}$","$\\bar{R}^2_{full}$","$N_{miRNA}$","Chosen miRNAs")
#ltable <- xtable(dltx, align="llrrrrrp{5cm}", digits=SIGDIGS, caption="Properties of final regression models.")
colnames(dltx) <- c("Gene","$R^2_{gene}$","$R^2_{full}$","$N_{miRNA}$","Chosen miRNAs")
ltable <- xtable(dltx, align="llllrp{8cm}", digits=SIGDIGS, caption="Properties of final regression models.")
janitor <- function(x){paste0('{\\textbf{', x,'}}')}
document.out <- print(ltable, sanitize.colnames.function=janitor, print.results=F, include.rownames=F)
| /R/final_model_results.R | no_license | aittomaki/dippa-analyysi | R | false | false | 9,526 | r | # Script for processing results from final model simulations
library(rstanarm)
library(xtable)
library(ggplot2)
library(reshape2)
library(plyr)
theme_set(theme_bw())
# Helper function for conversion from df to matrix
matrix.plz <- function(x) {
m <- as.matrix(x[,-1])
rownames(m) <- as.character(x[,1])
m
}
# Input, params and output
if(exists("param1")) { # Anduril
RESULTDIR <- param1
thresh <- param2 # e.g. U0.2_a0.9
PLOTDIR <- document.dir
LATEX.CSV <- get.output(cf,'optOut1')
COEFS.CSV <- get.output(cf,'optOut2')
cv.chosen.vars <- table1
prot <- table2
gene <- table3
mirna <- table4
rm(optOut1)
rm(optOut2)
} else { # non-Anduril
RESULTDIR <- "/home/viljami/wrk/finalmodel_results"
PLOTDIR <- "/home/viljami/wrk/finalmodel_results/plots"
OUTFILE <- "/home/viljami/wrk/finalmodel_results/foo.csv"
LATEX.CSV <- "/home/viljami/wrk/finalmodel_results/ltable.csv"
COEFS.CSV <- "/home/viljami/wrk/finalmodel_results/coefs.csv"
WRKDIR <- Sys.getenv("WRKDIR")
cv.chosen.vars <- read.delim(file.path(WRKDIR,"dippa-data","n_chosen_variables.csv"))
prot <- read.delim(file.path(WRKDIR,"dippa-data","protein_normalized.csv"))
gene <- read.delim(file.path(WRKDIR,"dippa-data","gene_normalized.csv"))
mirna <- read.delim(file.path(WRKDIR,"dippa-data","mirna_normalized.csv"))
if(!dir.exists(PLOTDIR)) dir.create(PLOTDIR)
thresh <- "U0.2_a0.5"
}
SIGDIGS <- 3
# Sanity checking
if(!(thresh %in% colnames(cv.chosen.vars)))
stop(sprintf("Threshold '%s' not available!", thresh))
# Convert experssion data into matrices
samples <- as.character(prot[,1])
prot <- matrix.plz(prot)[samples,]
gene <- matrix.plz(gene)[samples,]
mirna <- matrix.plz(mirna)[samples,]
# Get result file names
resfiles <- list.files(RESULTDIR, pattern = "*.rda")
# Get gene names from filenames
genes <- sub("finalmodel-\\d+-(\\w+).rda", "\\1", resfiles)
# Sort into alphabetical order by gene
gord <- order(genes)
genes <- genes[gord]
resfiles <- resfiles[gord]
rm(gord)
# Result table data frame
d <- data.frame(gene=genes, R2_gene=0, R2adj_gene=0,
R2_full=NA, R2adj_full=NA, full_model_found="",
gene_only_significant="", gene_full_significant="",
n_miRNAs=0, chosen_miRNAs="",
n_significant_miRNAs=0, significant_miRNAs="", significant_miRNA_inds="", stringsAsFactors=F)
# Data frame for coefs for all models (compile as a list)
d.coefs <- list()
for (i in 1:length(resfiles)) {
#for (i in 1:2) {
f <- resfiles[i]
g <- genes[i]
# Load CV results for current gene
load(file.path(RESULTDIR,f))
# Get number of covariates chosen in CV (includes gene!)
n_vars <- cv.chosen.vars[match(g, cv.chosen.vars$gene), thresh]
if(!is.na(n_vars) && n_vars > params$n_vars) {
warning(sprintf("%s : n_vars too big (%d > %d)!",g, n_vars, params$n_vars))
n_vars <- NA
}
d[i,"n_miRNAs"] <- max(n_vars - 1, 0)
d[i,"full_model_found"] <- ifelse(!is.na(n_vars), "yes", "no")
# Gene only model results
d[i,"R2_gene"] <- r2.gene["R2"]
d[i,"R2adj_gene"] <- r2.gene["R2.adjusted"]
# Check if gene significant for gene only model
postsum.gene <- posterior_interval(fit.gene, prob = 0.95)
signifs <- sign(postsum.gene["G",1]) == sign(postsum.gene["G",2])
d[i,"gene_only_significant"] <- ifelse(signifs, "yes", "no")
# Full model results
if(!is.na(n_vars)) {
if(n_vars > 1) {
# Get names of chosen mirnas
mirna_vars <- spath$chosen[3:(n_vars+1)]-2 #1 is constant, 2 is gene
mirna_names <- colnames(mirna)[mirna_vars]
mirna_names <- gsub("\\.", "-", sub("\\.$", "\\*", mirna_names))
d[i,"chosen_miRNAs"] <- paste(mirna_names, collapse=",")
x <- cbind(1, gene[,g], mirna[,mirna_vars])
} else {
# no miRNAs chosen
mirna_names <- c()
x <- cbind(1, gene[,g])
}
# Projected w's weren't computed for w0+g-models!
# So R2's can't be computed (instead NA).
# If that is corrected, then remove this if (and else)!
if(n_vars > 1) {
# Create regression matrices
x <- x[,1:(n_vars+1)] # drops gene if only constant selected
y <- prot[,g]
n <- length(y)
# Get projected weights for chosen mirnas
w <- as.matrix(spath$w[[n_vars+1]][spath$chosen[1:(n_vars+1)],])
# Compute predicted prot expr
ypred <- x %*% w
# Compute R2 and adjusted R2
R2 <- 1 - colSums((y-ypred)^2)/sum((y-mean(y))^2)
R2.adj <- 1 - (1-R2)*(n-1)/(n-n_vars+1)
resid <- y-ypred
R2.var <- 1-apply(resid,2,var)/var(y)
R2.var.adj <- 1 - (1-R2.var)*(n-1)/(n-n_vars+1)
d[i,"R2_full"] <- median(R2)
d[i,"R2adj_full"] <- median(R2.adj)
# Posterior summary for projected weights
postsum <- t(apply(w, 1, quantile, probs=c(0.025,0.1,0.25,0.5,0.75,0.9,0.975)))
rownames(postsum) <- c("w0", "gene", mirna_names)[1:(n_vars+1)]
sdev <- as.vector(apply(w, 1, sd))
wgt <- as.vector(apply(w, 1, function(x) {ifelse(median(x)>0, sum(x>0)/length(x), sum(x<0)/length(x))}))
signifs <- sign(postsum[,"2.5%"]) == sign(postsum[,"97.5%"])
cfs <- data.frame(gene=g, variable=rownames(postsum), median=postsum[,"50%"], IQR=postsum[,"75%"]-postsum[,"25%"], sd=sdev, significant=ifelse(signifs, "yes", "no"), weight=wgt)
d.coefs <- c(d.coefs, list(cfs))
if(n_vars > 0) {
d[i,"gene_full_significant"] <- ifelse(signifs["gene"], "yes", "no")
if(n_vars > 1) {
d[i,"n_significant_miRNAs"] <- sum(signifs[3:length(signifs)])
d[i,"significant_miRNAs"] <- paste(grep("miR",rownames(postsum)[signifs], value=T), collapse=",")
d[i,"significant_miRNA_inds"] <- paste(which(signifs[3:length(signifs)]), collapse=",")
}
}
} else {
d[i,"R2_full"] <- NA
d[i,"R2adj_full"] <- NA
}
}
}
# Output
table.out <- d
if(!exists("param1")) { # non-Anduril
write.table(table.out, file=OUTFILE, sep="\t", row.names=F)
}
d.coefs <- do.call(rbind, d.coefs)
write.table(d.coefs, file=COEFS.CSV, sep="\t", row.names=F)
# Make a version of the table for latex in Anduril
numf <- paste0("%.",SIGDIGS,"f")
dlat <- d[,c("gene","R2_gene","R2_full","n_miRNAs","significant_miRNAs")]
# Combine n_miRNAs and n_significant_miRNAs
nonNA <- !is.na(dlat$n_miRNAs) & dlat$n_miRNAs > 0
dlat$n_miRNAs[nonNA] <- paste0(d$n_miRNAs, " (", d$n_significant_miRNAs, ")")[nonNA]
# Add gene significants as asterisks
gene_only_sigs <- revalue(d$gene_only_significant, c(yes="$^{\\ast}$", no=""))
gene_full_sigs <- revalue(d$gene_full_significant, c(yes="$^{\\ast}$", no=""))
dlat$R2_gene <- paste0(sprintf(numf, d$R2_gene), " (", sprintf(numf, d$R2adj_gene), ")", gene_only_sigs)
dlat$R2_full[nonNA] <- paste0(sprintf(numf, d$R2_full), " (", sprintf(numf, d$R2adj_full), ")", gene_full_sigs)[nonNA]
# Round R2_adjs
write.table(dlat, file=LATEX.CSV, sep="\t", row.names=F)
# Make a plot of var num vs R2 and num signif
# #dm <- subset(d, select=c(gene, n_miRNAs, n_significant_miRNAs, R2_full, R2_full_adj))
# dm <- subset(d, select=c(gene, n_miRNAs, n_significant_miRNAs, R2_full))
# dm$delta_R2adj <- d$R2adj_full - d$R2adj_gene
# dm <- melt(dm, id.vars=c("gene","n_miRNAs"))
# #dm$adjusted[dm$variable == "R2_full"] <- "no"
# #dm$adjusted[dm$variable == "R2_full_adj"] <- "yes"
# #dm$adjusted[dm$variable == "n_significant_miRNAs"] <- "NA"
# #dm$variable[dm$variable == "R2_full_adj"] <- "R2_full"
# # Pretty the variable names
# dm$variable <- revalue(dm$variable, c(
# n_significant_miRNAs = "N~significant~miRNAs",
# R2_full = "R[full]^2",
# delta_R2adj = "Delta~bar(R)^2"
# ))
# g <- ggplot(dm, aes(x = n_miRNAs, y = value))#, color=adjusted, size=adjusted))
# g <- g + geom_point(alpha=0.6) + geom_line(stat="smooth", method="loess", alpha=0.3)
# g <- g + geom_ribbon(stat="smooth", method="loess", alpha=0.05)##, aes(color=NULL, group=adjusted))
# g <- g + facet_grid(variable ~ ., scales = "free", switch="y", labeller=label_parsed)
# #g <- g + scale_size_manual(values=c(1,1,0.5), guide=F)
# #g <- g + scale_color_discrete(name="R2 adjusted", breaks=c("no","yes"))
# #g <- g + ggtitle(parse(text=sub("U(.*)_a(.*)", "alpha:\\2~~~gamma:\\1", thresh)))
# g <- g + labs(x="N miRNAs", y=NULL)
# g <- g + theme(strip.text.y = element_text(size = 12))
# plot.file <- file.path(PLOTDIR, sprintf("n_miRNAs_R2s_%s.pdf", thresh))
# ggsave(plot.file, g, height=7, width=9, dpi=600)
# Make a latex table version in R (NOT USED)
dltx <- dlat
#colnames(dltx) <- c("Gene","$R^2_{gene}$","$R^2_{full}$","$\\bar{R}^2_{gene}$","$\\bar{R}^2_{full}$","$N_{miRNA}$","Chosen miRNAs")
#ltable <- xtable(dltx, align="llrrrrrp{5cm}", digits=SIGDIGS, caption="Properties of final regression models.")
colnames(dltx) <- c("Gene","$R^2_{gene}$","$R^2_{full}$","$N_{miRNA}$","Chosen miRNAs")
ltable <- xtable(dltx, align="llllrp{8cm}", digits=SIGDIGS, caption="Properties of final regression models.")
janitor <- function(x){paste0('{\\textbf{', x,'}}')}
document.out <- print(ltable, sanitize.colnames.function=janitor, print.results=F, include.rownames=F)
|
#require(detrendeR)
#detrender()
y = function(x, first.year, last.year){
if (last.year<first.year) { temp<-first.year; first.year<-last.year ; last.year<-temp}
subset(x, as.integer(rownames(x))>=first.year & as.integer(rownames(x))<=last.year)
}
s = function (x){
tk.select.list = function (choices, preselect = NULL, multiple = FALSE, title = NULL)
{
fontFixedWidth <- tkfont.create(family="courier",size=9)
have_ttk <- as.character(tcl("info", "tclversion")) >= "8.5"
if (!have_ttk)
ttkbutton <- tkbutton
lvar <- tclVar()
tclObj(lvar) <- choices
oldmode <- tclServiceMode(FALSE)
dlg <- tktoplevel()
tkwm.resizable(dlg, 0, 0)
tkwm.title(dlg, title)
tkwm.deiconify(dlg)
tkgrab.set(dlg)
tkfocus(dlg)
# if (!is.null(title) && nzchar(title)) {
# lab <- if (have_ttk)
# ttklabel(dlg, text = title, foreground = "blue")
# else tklabel(dlg, text = title, fg = "blue")
# tkpack(lab, side = "top")
# }
lab1<-ttklabel(dlg, text = "Series First Last Span ",font= fontFixedWidth)
tkpack(lab1, side = "top")
onOK <- function() {
res <- 1L + as.integer(tkcurselection(box))
ans.select_list <<- choices[res]
tkgrab.release(dlg)
tkdestroy(dlg)
}
onCancel <- function() {
tkgrab.release(dlg)
tkdestroy(dlg)
}
buttons <- tkframe(dlg)
tkpack(buttons, side = "bottom")
OK <- ttkbutton(buttons, text = gettext("OK"), width = 6,
command = onOK)
Cancel <- ttkbutton(buttons, text = gettext("Cancel"), command = onCancel)
tkpack(OK, Cancel, side = "left", fill = "x", padx = "2m")
scht <- as.numeric(tclvalue(tkwinfo("screenheight", dlg))) -
200L
ht <- min(length(choices), scht%/%20)
box <- tklistbox(dlg, height = ht, listvariable = lvar, bg = "white",
setgrid = 1, selectmode = ifelse(multiple, "multiple",
"single"))
tmp <- tcl("font", "metrics", tkcget(box, font = NULL))
tmp <- as.numeric(sub(".*linespace ([0-9]+) .*", "\\1", tclvalue(tmp))) +
3
ht <- min(length(choices), scht%/%tmp)
tkdestroy(box)
if (ht < length(choices)) {
scr <- if (have_ttk)
ttkscrollbar(dlg, command = function(...) tkyview(box,
...))
else tkscrollbar(dlg, repeatinterval = 5, command = function(...) tkyview(box,
...))
box <- tklistbox(dlg, height = ht, width = 0, listvariable = lvar,
bg = "white", setgrid = 1, selectmode = ifelse(multiple,
"multiple", "single"),font=fontFixedWidth, yscrollcommand = function(...) tkset(scr,
...))
tkpack(box, side = "left", fill = "both", expand = TRUE)
tkpack(scr, side = "right", fill = "y")
}
else {
box <- tklistbox(dlg, height = ht, width = 0, listvariable = lvar,
bg = "white",font=fontFixedWidth, selectmode = ifelse(multiple, "multiple",
"single"))
tkpack(box, side = "left", fill = "both")
}
preselect <- match(preselect, choices)
preselect <- preselect[preselect > 0L] - 1L
if (length(preselect)) {
for (i in preselect) tkselection.set(box, i)
tkyview(box, preselect[1L])
}
ans.select_list <- character()
all = function () for (i in 1:length(choices)) tkselection.set(box, i-1)
none = function () for (i in 1:length(choices)) tkselection.clear(box, i-1)
tkbind(dlg, "<Destroy>", onCancel)
tkbind(dlg, "<Double-ButtonPress-1>", onOK)
tkbind(box, "<Control-a>", all)
tkbind(box, "<Control-x>", none)
tkfocus(box)
tclServiceMode(oldmode)
tkwait.window(dlg)
Sys.sleep(0.1)
if (!multiple && !length(ans.select_list))
ans.select_list <- ""
ans.select_list
}
yr.range = function(x) {
yr.vec = as.numeric(names(x))
mask = !is.na(x)
range(yr.vec[mask])
}
info.fun = function(x) {
first<-yr.range(x)[1]
last<-yr.range(x)[2]
paste(format(first,width=6, justify="right"), format(last,width=6), format(last-first+1,width=6), " ")
}
series<-paste( "",format(colnames(x), width=10),apply(x,2,info.fun), sep=" ")
selected.series<-tk.select.list(series, multiple=TRUE,preselect=series,title="Select the series to keep")
x[,series%in%selected.series]->temp
if(sum(series%in%selected.series)==0) return (invisible())
if(sum(series%in%selected.series)==1) {data.frame(x[,series%in%selected.series])->temp
colnames(temp)<-colnames(x)[series%in%selected.series]
rownames(temp)<-rownames(x)
apply(temp,1,sum, na.rm=T)->years
data.frame(temp[years>0,])->TEMP
colnames(TEMP)<-colnames(temp)
rownames(TEMP)<-rownames(temp)[years>0]
return(TEMP)
}
apply(temp,1,sum, na.rm=T)->years
temp[years>0,]
}
#data(co021)
#trimCol(co021)
| /detrendeR/R/CodeR.r | no_license | ingted/R-Examples | R | false | false | 4,951 | r | #require(detrendeR)
#detrender()
y = function(x, first.year, last.year){
if (last.year<first.year) { temp<-first.year; first.year<-last.year ; last.year<-temp}
subset(x, as.integer(rownames(x))>=first.year & as.integer(rownames(x))<=last.year)
}
s = function (x){
tk.select.list = function (choices, preselect = NULL, multiple = FALSE, title = NULL)
{
fontFixedWidth <- tkfont.create(family="courier",size=9)
have_ttk <- as.character(tcl("info", "tclversion")) >= "8.5"
if (!have_ttk)
ttkbutton <- tkbutton
lvar <- tclVar()
tclObj(lvar) <- choices
oldmode <- tclServiceMode(FALSE)
dlg <- tktoplevel()
tkwm.resizable(dlg, 0, 0)
tkwm.title(dlg, title)
tkwm.deiconify(dlg)
tkgrab.set(dlg)
tkfocus(dlg)
# if (!is.null(title) && nzchar(title)) {
# lab <- if (have_ttk)
# ttklabel(dlg, text = title, foreground = "blue")
# else tklabel(dlg, text = title, fg = "blue")
# tkpack(lab, side = "top")
# }
lab1<-ttklabel(dlg, text = "Series First Last Span ",font= fontFixedWidth)
tkpack(lab1, side = "top")
onOK <- function() {
res <- 1L + as.integer(tkcurselection(box))
ans.select_list <<- choices[res]
tkgrab.release(dlg)
tkdestroy(dlg)
}
onCancel <- function() {
tkgrab.release(dlg)
tkdestroy(dlg)
}
buttons <- tkframe(dlg)
tkpack(buttons, side = "bottom")
OK <- ttkbutton(buttons, text = gettext("OK"), width = 6,
command = onOK)
Cancel <- ttkbutton(buttons, text = gettext("Cancel"), command = onCancel)
tkpack(OK, Cancel, side = "left", fill = "x", padx = "2m")
scht <- as.numeric(tclvalue(tkwinfo("screenheight", dlg))) -
200L
ht <- min(length(choices), scht%/%20)
box <- tklistbox(dlg, height = ht, listvariable = lvar, bg = "white",
setgrid = 1, selectmode = ifelse(multiple, "multiple",
"single"))
tmp <- tcl("font", "metrics", tkcget(box, font = NULL))
tmp <- as.numeric(sub(".*linespace ([0-9]+) .*", "\\1", tclvalue(tmp))) +
3
ht <- min(length(choices), scht%/%tmp)
tkdestroy(box)
if (ht < length(choices)) {
scr <- if (have_ttk)
ttkscrollbar(dlg, command = function(...) tkyview(box,
...))
else tkscrollbar(dlg, repeatinterval = 5, command = function(...) tkyview(box,
...))
box <- tklistbox(dlg, height = ht, width = 0, listvariable = lvar,
bg = "white", setgrid = 1, selectmode = ifelse(multiple,
"multiple", "single"),font=fontFixedWidth, yscrollcommand = function(...) tkset(scr,
...))
tkpack(box, side = "left", fill = "both", expand = TRUE)
tkpack(scr, side = "right", fill = "y")
}
else {
box <- tklistbox(dlg, height = ht, width = 0, listvariable = lvar,
bg = "white",font=fontFixedWidth, selectmode = ifelse(multiple, "multiple",
"single"))
tkpack(box, side = "left", fill = "both")
}
preselect <- match(preselect, choices)
preselect <- preselect[preselect > 0L] - 1L
if (length(preselect)) {
for (i in preselect) tkselection.set(box, i)
tkyview(box, preselect[1L])
}
ans.select_list <- character()
all = function () for (i in 1:length(choices)) tkselection.set(box, i-1)
none = function () for (i in 1:length(choices)) tkselection.clear(box, i-1)
tkbind(dlg, "<Destroy>", onCancel)
tkbind(dlg, "<Double-ButtonPress-1>", onOK)
tkbind(box, "<Control-a>", all)
tkbind(box, "<Control-x>", none)
tkfocus(box)
tclServiceMode(oldmode)
tkwait.window(dlg)
Sys.sleep(0.1)
if (!multiple && !length(ans.select_list))
ans.select_list <- ""
ans.select_list
}
yr.range = function(x) {
yr.vec = as.numeric(names(x))
mask = !is.na(x)
range(yr.vec[mask])
}
info.fun = function(x) {
first<-yr.range(x)[1]
last<-yr.range(x)[2]
paste(format(first,width=6, justify="right"), format(last,width=6), format(last-first+1,width=6), " ")
}
series<-paste( "",format(colnames(x), width=10),apply(x,2,info.fun), sep=" ")
selected.series<-tk.select.list(series, multiple=TRUE,preselect=series,title="Select the series to keep")
x[,series%in%selected.series]->temp
if(sum(series%in%selected.series)==0) return (invisible())
if(sum(series%in%selected.series)==1) {data.frame(x[,series%in%selected.series])->temp
colnames(temp)<-colnames(x)[series%in%selected.series]
rownames(temp)<-rownames(x)
apply(temp,1,sum, na.rm=T)->years
data.frame(temp[years>0,])->TEMP
colnames(TEMP)<-colnames(temp)
rownames(TEMP)<-rownames(temp)[years>0]
return(TEMP)
}
apply(temp,1,sum, na.rm=T)->years
temp[years>0,]
}
#data(co021)
#trimCol(co021)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mutar.R
\name{mutar}
\alias{filtar}
\alias{mutar}
\alias{sumar}
\title{Tools for Data Frames}
\usage{
mutar(x, i, j, ..., by, sby, drop)
filtar(x, i)
sumar(x, ..., by)
}
\arguments{
\item{x}{(DataFrame | data.frame)}
\item{i}{(logical | numeric | integer | OneSidedFormula | TwoSidedFormula |
FormulaList) see the examples.}
\item{j}{(logical | character | TwoSidedFormula | FormulaList | function)
character beginning with '^' are interpreted as regular expression}
\item{...}{arbitrary number of args
\cr in \code{[} (TwoSidedFormulas)
\cr in constructor see \link[tibble]{data_frame}}
\item{by}{(character) variable names used in \link{group_by}. Using `sby`
triggers a summarise.}
\item{sby}{(character) variable names used in \link{group_by}. Using `sby`
triggers a summarise.}
\item{drop}{(ignored) never drops the class.}
}
\description{
\code{mutar} is literally the same function as \code{[.DataFrame} and can be
used as a generic interface to dplyr. Other functions here listed are a
convenience to mimic dplyr's syntax in a \code{R CMD check} friendly way.
These functions can also be used with S4 data.frame(s) / data_frame(s) /
data.table(s). They will always preserve the input class.
}
\details{
The real workhorse of this interface is \code{mutar}. All other functions
exist to ease the transition from dplyr.
\code{OneSidedFormula} is always used for subsetting rows.
\code{TwoSidedFormula} is used instead of name-value expressions in
\link[dplyr]{summarise} and \link[dplyr]{mutate}.
\code{FormulaList} can be used to repeat the same operation on different
columns.
}
\examples{
data("airquality")
airquality \%>\%
filtar(~Month > 4) \%>\%
mutar(meanWind ~ mean(Wind), by = "Month") \%>\%
sumar(meanWind ~ mean(Wind), by = "Month") \%>\%
extract("meanWind")
airquality \%>\%
sumar(
FL(.n ~ mean(.n), .n = c("Wind", "Temp")),
by = "Month"
)
}
\seealso{
\link{extract}, \link{DataFrame}, \link{FL}
}
| /man/mutar.Rd | no_license | rlugojr/dat | R | false | true | 2,028 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mutar.R
\name{mutar}
\alias{filtar}
\alias{mutar}
\alias{sumar}
\title{Tools for Data Frames}
\usage{
mutar(x, i, j, ..., by, sby, drop)
filtar(x, i)
sumar(x, ..., by)
}
\arguments{
\item{x}{(DataFrame | data.frame)}
\item{i}{(logical | numeric | integer | OneSidedFormula | TwoSidedFormula |
FormulaList) see the examples.}
\item{j}{(logical | character | TwoSidedFormula | FormulaList | function)
character beginning with '^' are interpreted as regular expression}
\item{...}{arbitrary number of args
\cr in \code{[} (TwoSidedFormulas)
\cr in constructor see \link[tibble]{data_frame}}
\item{by}{(character) variable names used in \link{group_by}. Using `sby`
triggers a summarise.}
\item{sby}{(character) variable names used in \link{group_by}. Using `sby`
triggers a summarise.}
\item{drop}{(ignored) never drops the class.}
}
\description{
\code{mutar} is literally the same function as \code{[.DataFrame} and can be
used as a generic interface to dplyr. Other functions here listed are a
convenience to mimic dplyr's syntax in a \code{R CMD check} friendly way.
These functions can also be used with S4 data.frame(s) / data_frame(s) /
data.table(s). They will always preserve the input class.
}
\details{
The real workhorse of this interface is \code{mutar}. All other functions
exist to ease the transition from dplyr.
\code{OneSidedFormula} is always used for subsetting rows.
\code{TwoSidedFormula} is used instead of name-value expressions in
\link[dplyr]{summarise} and \link[dplyr]{mutate}.
\code{FormulaList} can be used to repeat the same operation on different
columns.
}
\examples{
data("airquality")
airquality \%>\%
filtar(~Month > 4) \%>\%
mutar(meanWind ~ mean(Wind), by = "Month") \%>\%
sumar(meanWind ~ mean(Wind), by = "Month") \%>\%
extract("meanWind")
airquality \%>\%
sumar(
FL(.n ~ mean(.n), .n = c("Wind", "Temp")),
by = "Month"
)
}
\seealso{
\link{extract}, \link{DataFrame}, \link{FL}
}
|
#' @title Check arguments for \code{plspm} and \code{plspm.fit}
#'
#' @details
#' Internal function. \code{check_args} is called by \code{plspm}.
#'
#' @param Data numeric matrix or data frame containing the manifest variables.
#' @param path_matrix square (lower triangular) boolean matrix for inner model.
#' @param blocks List of vectors (either numeric or character) to specify
#' the blocks of manifest variables asociated to the latent variables.
#' @param scaling optional list of string vectors indicating the type of
#' measurement scale for each manifest variable specified in \code{blocks}.
#' @param modes character indicating the type of measurement.
#' @param scheme string indicating the type of inner weighting scheme.
#' @param scaled logical indicating whether scaling data is performed.
#' @param tol decimal value indicating the tol criterion for covergence.
#' @param maxiter integer indicating the maximum number of iterations.
#' @param plscomp optional vector indicating the number of PLS components
#' (for each block) to be used when handling non-metric data
#' (only used if \code{scaling} is provided)
#' @param boot.val whether bootstrap validation is performed.
#' @param br integer indicating the number bootstrap resamples.
#' @param dataset whether the data matrix should be retrieved.
#' @return list of validated arguments
#' @keywords internal
#' @template internals
#' @export
check_args <-
function(Data, path_matrix, blocks, scaling, modes, scheme,
scaled, tol, maxiter, plscomp, boot.val, br, dataset)
{
# check definitions
Data = check_data(Data)
path_matrix = check_path(path_matrix)
blocks = check_blocks(blocks, Data)
specs = check_specs(blocks, scaling, modes, scheme, scaled,
tol, maxiter, plscomp)
boot_args = check_boot(boot.val, br)
if (!is.logical(dataset)) dataset = TRUE
# check congruence between inner model and outer model
good_model = check_model(path_matrix, blocks)
# list with verified arguments
list(Data = Data,
path_matrix = path_matrix,
blocks = blocks,
specs = specs,
boot.val = boot_args$boot.val,
br = boot_args$br,
dataset = dataset)
}
#' @title Check Data
#'
#' @details
#' Internal function. \code{check_data} is called by \code{check_args}.
#'
#' @param Data numeric matrix or data frame containing the manifest variables.
#' @return validated Data
#' @keywords internal
#' @template internals
#' @export
check_data <- function(Data)
{
if (is_not_tabular(Data))
stop("\nInvalid 'Data'. Must be a matrix or data frame.")
if (is.matrix(Data) && !is.numeric(Data))
stop("\nInvalid 'Data' matrix. Must be a numeric matrix.")
if (nrow(Data) == 1)
stop("\nCannot work with only one row in 'Data'")
if (ncol(Data) == 1)
stop("\nCannot work with only one column in 'Data'")
if (lacks_rownames(Data))
rownames(Data) = 1:nrow(Data)
if (lacks_colnames(Data))
colnames(Data) = paste("MV", 1:ncol(Data), sep="")
# return
Data
}
#' @title Check path matrix
#'
#' @details
#' Internal function. \code{check_path} is called by \code{check_args}.
#'
#' @param path_matrix square (lower triangular) boolean matrix for inner model
#' @return validated path matrix
#' @keywords internal
#' @template internals
#' @export
check_path <- function(path_matrix)
{
if (is_not_matrix(path_matrix))
stop("\n'path_matrix' must be a matrix.")
if (!is_square_matrix(path_matrix))
stop("\n'path_matrix' must be a square matrix.")
if (nrow(path_matrix) == 1)
stop("\n'path_matrix' must have more than one row")
if (!is_lower_triangular(path_matrix))
stop("\n'path_matrix' must be a lower triangular matrix")
for (j in 1:ncol(path_matrix))
{
for (i in 1:nrow(path_matrix))
{
if (length(intersect(path_matrix[i,j], c(1,0))) == 0)
stop("\nElements in 'path_matrix' must be '1' or '0'")
}
}
if (lacks_dimnames(path_matrix)) {
LV_names = paste("LV", 1:ncol(path_matrix), sep = "")
dimnames(path_matrix) = list(LV_names, LV_names)
}
if (has_rownames(path_matrix) && lacks_colnames(path_matrix)) {
colnames(path_matrix) = rownames(path_matrix)
}
if (has_colnames(path_matrix) && lacks_rownames(path_matrix)) {
rownames(path_matrix) = colnames(path_matrix)
}
# return
path_matrix
}
#' @title Check well defined blocks
#'
#' @details
#' Internal function. \code{check_blocks} is called by \code{check_args}.
#'
#' @param blocks list defining the blocks of manifest variables. Elements in
#' \code{blocks} must have the same mode: all "numeric" or all "character".
#' @param Data matrix or data frame from where to extract manifest variables.
#' @return validated blocks (output in numeric format)
#' @keywords internal
#' @template internals
#' @export
check_blocks <- function(blocks, Data)
{
if (!is.list(blocks))
stop("\n'blocks' must be a list.")
# no duplicated elements within each block
mvs_duplicated = unlist(lapply(blocks, duplicated))
if (any(mvs_duplicated))
stop("\nWrong 'blocks'. Duplicated variables in a block are not allowed")
# all elements in blocks of same mode
mvs_mode = unique(unlist(lapply(blocks, mode)))
if (length(mvs_mode) > 1)
stop("\nAll elements in 'blocks' must have the same mode")
# check indices inside columns range of Data
if (mvs_mode == "numeric") {
blocks_in_data = match(unlist(blocks), 1:ncol(Data))
if (any(is.na(blocks_in_data)))
stop("\nIndices in 'blocks' outside the number of columns in 'Data'")
}
# convert character blocks to numeric blocks
if (mvs_mode == "character") {
data_names = colnames(Data)
matched_names = match(unlist(blocks), data_names)
if (any(is.na(matched_names))) {
bad_names = unlist(blocks)[is.na(matched_names)]
stop(sprintf("\nUnrecognized name in 'blocks': '%s'", bad_names))
}
blocks = lapply(blocks, function(x, y) match(x, y), data_names)
}
# output
blocks
}
#' @title Check bootstrap options
#'
#' @details
#' Internal function. \code{check_boot} is called by \code{check_args}.
#'
#' @param boot.val logical indicating whether to perform bootstrapping
#' @param br number of bootstrap resamples
#' @return validated bootstrap options
#' @keywords internal
#' @template internals
#' @export
check_boot <- function(boot.val, br)
{
if (!is.logical(boot.val)) boot.val = FALSE
if (boot.val) {
if (!is.null(br)) {
if(!is_positive_integer(br) || length(br) != 1L || br < 10) {
warning("Warning: Invalid argument 'br'. Default 'br=100' is used.")
br = 100
}
} else
br = 100
}
# return
list(boot.val = boot.val, br = br)
}
#' @title Check congruence between inner and outer models
#'
#' @details
#' Internal function. \code{check_model} is called by \code{check_args}.
#'
#' @param path_matrix matrix specifying the path connections
#' @param blocks list defining the blocks of manifest variables
#' @return an error if there is something wrong
#' @keywords internal
#' @template internals
#' @export
check_model <- function(path_matrix, blocks)
{
# compatibility between path_matrix and blocks
if (length(blocks) != nrow(path_matrix))
stop("\nNumber of rows in 'path_matrix' different from length of 'blocks'.")
# output
TRUE
}
| /R/check_arguments.r | no_license | gastonstat/plspm | R | false | false | 7,418 | r | #' @title Check arguments for \code{plspm} and \code{plspm.fit}
#'
#' @details
#' Internal function. \code{check_args} is called by \code{plspm}.
#'
#' @param Data numeric matrix or data frame containing the manifest variables.
#' @param path_matrix square (lower triangular) boolean matrix for inner model.
#' @param blocks List of vectors (either numeric or character) to specify
#' the blocks of manifest variables asociated to the latent variables.
#' @param scaling optional list of string vectors indicating the type of
#' measurement scale for each manifest variable specified in \code{blocks}.
#' @param modes character indicating the type of measurement.
#' @param scheme string indicating the type of inner weighting scheme.
#' @param scaled logical indicating whether scaling data is performed.
#' @param tol decimal value indicating the tol criterion for covergence.
#' @param maxiter integer indicating the maximum number of iterations.
#' @param plscomp optional vector indicating the number of PLS components
#' (for each block) to be used when handling non-metric data
#' (only used if \code{scaling} is provided)
#' @param boot.val whether bootstrap validation is performed.
#' @param br integer indicating the number bootstrap resamples.
#' @param dataset whether the data matrix should be retrieved.
#' @return list of validated arguments
#' @keywords internal
#' @template internals
#' @export
check_args <-
function(Data, path_matrix, blocks, scaling, modes, scheme,
scaled, tol, maxiter, plscomp, boot.val, br, dataset)
{
# check definitions
Data = check_data(Data)
path_matrix = check_path(path_matrix)
blocks = check_blocks(blocks, Data)
specs = check_specs(blocks, scaling, modes, scheme, scaled,
tol, maxiter, plscomp)
boot_args = check_boot(boot.val, br)
if (!is.logical(dataset)) dataset = TRUE
# check congruence between inner model and outer model
good_model = check_model(path_matrix, blocks)
# list with verified arguments
list(Data = Data,
path_matrix = path_matrix,
blocks = blocks,
specs = specs,
boot.val = boot_args$boot.val,
br = boot_args$br,
dataset = dataset)
}
#' @title Check Data
#'
#' @details
#' Internal function. \code{check_data} is called by \code{check_args}.
#'
#' @param Data numeric matrix or data frame containing the manifest variables.
#' @return validated Data
#' @keywords internal
#' @template internals
#' @export
check_data <- function(Data)
{
if (is_not_tabular(Data))
stop("\nInvalid 'Data'. Must be a matrix or data frame.")
if (is.matrix(Data) && !is.numeric(Data))
stop("\nInvalid 'Data' matrix. Must be a numeric matrix.")
if (nrow(Data) == 1)
stop("\nCannot work with only one row in 'Data'")
if (ncol(Data) == 1)
stop("\nCannot work with only one column in 'Data'")
if (lacks_rownames(Data))
rownames(Data) = 1:nrow(Data)
if (lacks_colnames(Data))
colnames(Data) = paste("MV", 1:ncol(Data), sep="")
# return
Data
}
#' @title Check path matrix
#'
#' @details
#' Internal function. \code{check_path} is called by \code{check_args}.
#'
#' @param path_matrix square (lower triangular) boolean matrix for inner model
#' @return validated path matrix
#' @keywords internal
#' @template internals
#' @export
check_path <- function(path_matrix)
{
if (is_not_matrix(path_matrix))
stop("\n'path_matrix' must be a matrix.")
if (!is_square_matrix(path_matrix))
stop("\n'path_matrix' must be a square matrix.")
if (nrow(path_matrix) == 1)
stop("\n'path_matrix' must have more than one row")
if (!is_lower_triangular(path_matrix))
stop("\n'path_matrix' must be a lower triangular matrix")
for (j in 1:ncol(path_matrix))
{
for (i in 1:nrow(path_matrix))
{
if (length(intersect(path_matrix[i,j], c(1,0))) == 0)
stop("\nElements in 'path_matrix' must be '1' or '0'")
}
}
if (lacks_dimnames(path_matrix)) {
LV_names = paste("LV", 1:ncol(path_matrix), sep = "")
dimnames(path_matrix) = list(LV_names, LV_names)
}
if (has_rownames(path_matrix) && lacks_colnames(path_matrix)) {
colnames(path_matrix) = rownames(path_matrix)
}
if (has_colnames(path_matrix) && lacks_rownames(path_matrix)) {
rownames(path_matrix) = colnames(path_matrix)
}
# return
path_matrix
}
#' @title Check well defined blocks
#'
#' @details
#' Internal function. \code{check_blocks} is called by \code{check_args}.
#'
#' @param blocks list defining the blocks of manifest variables. Elements in
#' \code{blocks} must have the same mode: all "numeric" or all "character".
#' @param Data matrix or data frame from where to extract manifest variables.
#' @return validated blocks (output in numeric format)
#' @keywords internal
#' @template internals
#' @export
check_blocks <- function(blocks, Data)
{
if (!is.list(blocks))
stop("\n'blocks' must be a list.")
# no duplicated elements within each block
mvs_duplicated = unlist(lapply(blocks, duplicated))
if (any(mvs_duplicated))
stop("\nWrong 'blocks'. Duplicated variables in a block are not allowed")
# all elements in blocks of same mode
mvs_mode = unique(unlist(lapply(blocks, mode)))
if (length(mvs_mode) > 1)
stop("\nAll elements in 'blocks' must have the same mode")
# check indices inside columns range of Data
if (mvs_mode == "numeric") {
blocks_in_data = match(unlist(blocks), 1:ncol(Data))
if (any(is.na(blocks_in_data)))
stop("\nIndices in 'blocks' outside the number of columns in 'Data'")
}
# convert character blocks to numeric blocks
if (mvs_mode == "character") {
data_names = colnames(Data)
matched_names = match(unlist(blocks), data_names)
if (any(is.na(matched_names))) {
bad_names = unlist(blocks)[is.na(matched_names)]
stop(sprintf("\nUnrecognized name in 'blocks': '%s'", bad_names))
}
blocks = lapply(blocks, function(x, y) match(x, y), data_names)
}
# output
blocks
}
#' @title Check bootstrap options
#'
#' @details
#' Internal function. \code{check_boot} is called by \code{check_args}.
#'
#' @param boot.val logical indicating whether to perform bootstrapping
#' @param br number of bootstrap resamples
#' @return validated bootstrap options
#' @keywords internal
#' @template internals
#' @export
check_boot <- function(boot.val, br)
{
if (!is.logical(boot.val)) boot.val = FALSE
if (boot.val) {
if (!is.null(br)) {
if(!is_positive_integer(br) || length(br) != 1L || br < 10) {
warning("Warning: Invalid argument 'br'. Default 'br=100' is used.")
br = 100
}
} else
br = 100
}
# return
list(boot.val = boot.val, br = br)
}
#' @title Check congruence between inner and outer models
#'
#' @details
#' Internal function. \code{check_model} is called by \code{check_args}.
#'
#' @param path_matrix matrix specifying the path connections
#' @param blocks list defining the blocks of manifest variables
#' @return an error if there is something wrong
#' @keywords internal
#' @template internals
#' @export
check_model <- function(path_matrix, blocks)
{
# compatibility between path_matrix and blocks
if (length(blocks) != nrow(path_matrix))
stop("\nNumber of rows in 'path_matrix' different from length of 'blocks'.")
# output
TRUE
}
|
# Data Visualization with ggplot
# Clean the Legends
# Video 5.6
# Load the tidyverse libraries
library(tidyverse)
library(ggmap)
# Load the college dataset
college <- read_csv('http://672258.youcanlearnit.net/college.csv')
college <- college %>%
mutate(state=as.factor(state), region=as.factor(region),
highest_degree=as.factor(highest_degree),
control=as.factor(control), gender=as.factor(gender),
loan_default_rate=as.numeric(loan_default_rate))
# Load a map of California
california <- map_data(map="county", region="California")
# Restrict the dataset to California schools
college <- college %>%
filter(state=="CA")
# Create a tibble of city names and locations
city_names <- c("Los Angeles", "San Diego", "San Jose", "San Francisco", "Fresno", "Sacramento")
locations <- geocode(city_names)
cities <- tibble (name=city_names, lat=locations$lat, lon=locations$lon)
# Create the plot
ggplot(california) +
geom_polygon(mapping=aes(x=long,y=lat,group=group), color="grey", fill="beige") +
coord_map() +
theme(plot.background=element_blank(),
panel.background = element_blank(),
axis.title=element_blank(),
axis.ticks=element_blank(),
axis.text=element_blank()) +
geom_point(data=college, mapping=aes(x=lon, y=lat, color=control, size=undergrads), alpha=0.6) +
geom_text(data=cities, mapping=aes(x=lon, y=lat, label=name))
# Rename the legends
ggplot(california) +
geom_polygon(mapping=aes(x=long,y=lat,group=group), color="grey", fill="beige") +
coord_map() +
theme(plot.background=element_blank(),
panel.background = element_blank(),
axis.title=element_blank(),
axis.ticks=element_blank(),
axis.text=element_blank()) +
geom_point(data=college, mapping=aes(x=lon, y=lat, color=control, size=undergrads), alpha=0.6) +
geom_text(data=cities, mapping=aes(x=lon, y=lat, label=name)) +
scale_size_continuous(name="Undergraduate Population") +
scale_color_discrete(name="Institutional Control")
# Remove the legend background
ggplot(california) +
geom_polygon(mapping=aes(x=long,y=lat,group=group), color="grey", fill="beige") +
coord_map() +
theme(plot.background=element_blank(),
panel.background = element_blank(),
axis.title=element_blank(),
axis.ticks=element_blank(),
axis.text=element_blank()) +
geom_point(data=college, mapping=aes(x=lon, y=lat, color=control, size=undergrads), alpha=0.6) +
geom_text(data=cities, mapping=aes(x=lon, y=lat, label=name)) +
scale_size_continuous(name="Undergraduate Population") +
scale_color_discrete(name="Institutional Control") +
theme(legend.key=element_blank())
| /R/ggplot2 - LinkedIn Learning/5_6/legends_complete.R | no_license | robbyjeffries1/DataSciencePortfolio | R | false | false | 2,693 | r | # Data Visualization with ggplot
# Clean the Legends
# Video 5.6
# Load the tidyverse libraries
library(tidyverse)
library(ggmap)
# Load the college dataset
college <- read_csv('http://672258.youcanlearnit.net/college.csv')
college <- college %>%
mutate(state=as.factor(state), region=as.factor(region),
highest_degree=as.factor(highest_degree),
control=as.factor(control), gender=as.factor(gender),
loan_default_rate=as.numeric(loan_default_rate))
# Load a map of California
california <- map_data(map="county", region="California")
# Restrict the dataset to California schools
college <- college %>%
filter(state=="CA")
# Create a tibble of city names and locations
city_names <- c("Los Angeles", "San Diego", "San Jose", "San Francisco", "Fresno", "Sacramento")
locations <- geocode(city_names)
cities <- tibble (name=city_names, lat=locations$lat, lon=locations$lon)
# Create the plot
ggplot(california) +
geom_polygon(mapping=aes(x=long,y=lat,group=group), color="grey", fill="beige") +
coord_map() +
theme(plot.background=element_blank(),
panel.background = element_blank(),
axis.title=element_blank(),
axis.ticks=element_blank(),
axis.text=element_blank()) +
geom_point(data=college, mapping=aes(x=lon, y=lat, color=control, size=undergrads), alpha=0.6) +
geom_text(data=cities, mapping=aes(x=lon, y=lat, label=name))
# Rename the legends
ggplot(california) +
geom_polygon(mapping=aes(x=long,y=lat,group=group), color="grey", fill="beige") +
coord_map() +
theme(plot.background=element_blank(),
panel.background = element_blank(),
axis.title=element_blank(),
axis.ticks=element_blank(),
axis.text=element_blank()) +
geom_point(data=college, mapping=aes(x=lon, y=lat, color=control, size=undergrads), alpha=0.6) +
geom_text(data=cities, mapping=aes(x=lon, y=lat, label=name)) +
scale_size_continuous(name="Undergraduate Population") +
scale_color_discrete(name="Institutional Control")
# Remove the legend background
ggplot(california) +
geom_polygon(mapping=aes(x=long,y=lat,group=group), color="grey", fill="beige") +
coord_map() +
theme(plot.background=element_blank(),
panel.background = element_blank(),
axis.title=element_blank(),
axis.ticks=element_blank(),
axis.text=element_blank()) +
geom_point(data=college, mapping=aes(x=lon, y=lat, color=control, size=undergrads), alpha=0.6) +
geom_text(data=cities, mapping=aes(x=lon, y=lat, label=name)) +
scale_size_continuous(name="Undergraduate Population") +
scale_color_discrete(name="Institutional Control") +
theme(legend.key=element_blank())
|
### some random shit
# ht, progress, recoder, identical2, all_equal2, search_df, search_hist,
# fapply, try_require, list2file, Restart, helpExtract, Round, round_to,
# updateR, read_clip, read_clip.csv, read_clip.tab, read_clip.fwf, icols,
# fill_df, kinda_sort, sym_sort, rgene, install_temp, nestedMerge, nestedmerge,
# path_extract, fname, file_name, file_ext, rm_ext, mgrepl, mgrep, msub, mgsub,
# flatten, tree, rm_null, cum_reset, cum_na, cumsum_na, cumprod_na, cummax_na,
# cummin_na, cum_mid, vgrep, vgrepl, justify, factors, sample_each, pickcol,
# lunique, rm_nonascii, sparkDT, clc, clear
#
# unexported:
# helpExtract_, mgrep_, msub_, fill_spaces_, render_sparkDT
###
#' \code{head}/\code{tail}
#'
#' \code{\link{rbind}} the \code{\link{head}} and \code{\link{tail}} of an
#' object.
#'
#' @param x an object
#' @param n an integer giving the first and last \code{n / 2} elements if
#' positive or the middle \code{n} elements if negative
#' @param sep separator
#'
#' @examples
#' ht(letters, 6, '...')
#' ht(letters, -6)
#'
#'
#' mt <- cbind(mtcars, n = seq.int(nrow(mtcars)))
#'
#' ## ends
#' ht(mt)
#' ht(mt, sep = '...')
#'
#' ## middle
#' ht(as.matrix(mt), -6)
#' ht(mt, -6)
#' ht(mt, -6, sep = '...')
#'
#' @export
ht <- function(x, n = 6L, sep = NULL) {
pn <- abs(n) / 2
FUN <- if (is.null(dim(x)))
function(...) setNames(c(...), NULL) else 'rbind'
FUN <- match.fun(FUN)
if (n < 0L) {
idx <- cut(seq.int(NROW(x)), breaks = 2L, labels = 1:2)
x <- if (is.null(dim(x)))
list(x[idx %in% '1'], x[idx %in% '2'])
else
list(
x[idx %in% '1', , drop = FALSE],
x[idx %in% '2', , drop = FALSE]
)
FUN(' ' = sep, tail(x[[1L]], pn), head(x[[2L]], pn), ' ' = sep)
} else FUN(head(x, pn), ' ' = sep, tail(x, pn))
}
#' Progress function
#'
#' Displays the percent (or iterations) completed during some loop.
#'
#' @param value numeric; i-th iteration or percent completed (values 0-100)
#' @param max.value numeric; n-th iteration; if missing, will assume percent
#' completion is desired
#' @param textbar logical; if \code{TRUE}, uses text progress bar which will
#' span across the console width; see \code{\link{options}}
#'
#' @examples
#' \dontrun{
#' iterations <- 77
#' ## percent completed:
#' for (ii in 1:iterations) {
#' progress(ii / iterations * 100)
#' Sys.sleep(.01)
#' }
#'
#' ## iterations completed
#' for (ii in 1:iterations) {
#' progress(ii, iterations)
#' Sys.sleep(.01)
#' }
#'
#' ## text progress bar
#' for (ii in 1:iterations) {
#' progress(ii, iterations, textbar = TRUE)
#' Sys.sleep(.01)
#' }
#' }
#'
#' @export
progress <- function(value, max.value, textbar = FALSE) {
if (!is.numeric(value))
stop('\'value\' must be numeric')
oo <- options(scipen = 10)
on.exit(options(oo))
percent <- if (missing(max.value)) {
max.value <- 100
TRUE
} else FALSE
f <- function(...) paste0(..., collapse = '')
erase.only <- value > max.value
max.value <- as.character(round(max.value))
l <- nchar(max.value)
# value <- formatC(round(value), width = l, format = 'd')
# max.value <- formatC(max.value, width = l, format = 'd')
if (textbar) {
# m <- getOption('width')
# r <- trunc(as.numeric(value) / as.numeric(max.value) * m)
# backspaces <- f(rep('\b', m * 2))
#
# if (erase.only) message <- ''
# else {
# message <- f('|', f(rep('=', max(0, r - 1))),
# f(rep(' ', max(0, m - r))), '|')
# cat(backspaces, message, sep = '')
# }
m <- getOption('width') - 5L
pct <- as.numeric(value) / as.numeric(max.value)
r <- trunc(pct * m)
backspaces <- f(rep('\b', m * 2))
message <- if (erase.only)
'' else {
message <- f('|', f(rep('=', max(0, r - 1))),
f(rep(' ', max(0, m - r))), '|')
cat(backspaces, message, sprintf(' %s%%', round(pct * 100)), sep = '')
}
} else {
if (percent) {
backspaces <- f(rep('\b', l + 14L))
message <- if (erase.only)
'' else sprintf('Progress: %s%%', round(value))
cat(backspaces, message, sep = '')
} else {
backspaces <- f(rep('\b', 2 * l + 17L))
message <- if (erase.only)
'' else sprintf('Progress: %s of %s ', value, max.value)
cat(backspaces, message, sep = '')
}
}
if (.Platform$OS.type == 'windows')
flush.console()
cat('\n')
}
#' Recode variables
#'
#' Recodes numeric, character, and factor values in a vector, list, matrix,
#' or data frame.
#'
#' When recoding a factor variable with a new level, \code{recoder}
#' automatically adds the corresponding level to \code{levels(object)} to
#' avoid errors.
#'
#' The function currently recursively replaces \code{pattern[i]} with
#' \code{replacement[i]} in sequential order, so if you intend to swap values,
#' say \code{a} and \code{b}, in an \code{object}, \code{recoder} will instead
#' first replace all occurrences of \code{a} with \code{b} and then all
#' occurrences of \code{b} with \code{a} resulting in the \code{object} with
#' no \code{b} occurrences; see examples. I will (may) fix this eventually.
#'
#' @param object object to recode
#' @param pattern what to replace
#' @param replacement what to replace \code{pattern} with
#' @param ... ignored
#'
#' @return
#' An object with the same length (or dimensions) and class as \code{object}
#' with the recoded variables.
#'
#' @seealso
#' \code{\link{fill_df}}; \code{\link[car]{recode}};
#' \code{\link{combine_levels}}
#'
#' @examples
#' recoder(mtcars$carb, c(1, 2), c('A', 'B'))
#' recoder(mtcars, c(1, 2), c('A', 'B'))
#'
#' mtcars <- within(mtcars, carb1 <- factor(carb))
#' recoder(mtcars$carb1, 1, 999)
#'
#' tmp <- c(list(1:5), list(5), list(NA))
#' recoder(tmp, 5, NA)
#'
#' ## example from note
#' tmp <- 1:10
#' recoder(tmp, c(1, 2), c(2, 1))
#' # [1] 1 1 3 4 5 6 7 8 9 10 ## actual return
#' # [1] 2 1 3 4 5 6 7 8 9 10 ## desired return
#'
#' @export
recoder <- function(object, pattern, replacement, ...) {
## to do:
# add swapping option
# add expression option, eg, if object[i, j] > 0, use replacement
# fix level printing: DONE
# allow NA for input: DONE
# need to recode factor and numeric NAs simultaneously?
m <- match.call()
op <- options(stringsAsFactors = FALSE)
on.exit(options(op))
if (is.factor(object)) {
lvl <- setdiff(replacement, levels(object))
if (length(lvl))
cat('level(s)', levels(factor(levels = lvl)),
'added to factor variable', deparse(m$object), '\n')
levels(object) <- c(levels(object), replacement)
# object <- droplevels(object)
}
if (length(replacement) == 1L)
replacement <- rep(replacement, length(pattern))
## helper functions
splitter <- function(df) {
setNames(split(t(df), seq.int(ncol(df))), names(df))
}
switcher <- function(f, g, h) {
if (is.na(g))
f[is.na(f)] <- h
else
f[f == g] <- h
f
}
superswitcher <- function(x, y, z) {
DF <- data.frame(y, z, stringsAsFactors = FALSE)
z <- x
if (class(DF[, 2L]) %in% c('character', 'factor')) {
lapply(seq.int(nrow(DF)), function(i) {
if (sum(z %in% DF[i, 1]) == 0) {
z <<- z
} else {
z <<- switcher(z, DF[i, 1L], as.character(DF[i, 2L]))
}
})
} else {
lapply(seq.int(nrow(DF)), function(i) {
z <<- switcher(z, DF[i, 1L], DF[i, 2L])
})
}
z
}
# treat certain object classes differently
if (is.vector(object) & !is.list(object)) {
sapply(object, superswitcher, pattern, replacement)
} else {
if (is.data.frame(object)) {
tmp <- do.call('data.frame',
lapply(unclass(object)[seq.int(ncol(object))],
superswitcher, pattern, replacement))
rownames(tmp) <- attr(object, 'row.names')
return(tmp)
}
if (is.matrix(object)) {
nrow <- nrow(object)
tmp <- do.call('rbind',
lapply(object, superswitcher, pattern, replacement))
tmp <- matrix(tmp, nrow = nrow, byrow = FALSE)
return(tmp)
} else {
if (is.factor(object))
factor(unlist(lapply(object, superswitcher, pattern, replacement)),
levels(object), ordered = is.ordered(object))
else lapply(object, superswitcher, pattern, replacement)
}
}
}
#' Test two or more objects for exact equality
#'
#' The safe and reliable way to test two or more objects for being exactly
#' equal; returns \code{TRUE} in this case, \code{FALSE} in every other case.
#'
#' @param ... any \code{R} objects
#' @param num.eq logical indicating if (\code{\link{double}} and
#' \code{\link{complex}} non-\code{\link{NA}}) numbers should be compared
#' using \code{\link{==}} ("equal"), or by bitwise comparison. The latter
#' (non-default) differentiates between -0 and +0.
#' @param single.NA logical indicating if there is conceptually just one
#' numeric \code{NA} and one \code{\link{NaN}}; \code{single.NA = FALSE}
#' differentiates bit patterns.
#' @param attrib.as.set logical indicating if \code{\link{attributes}} of
#' \code{...} should be treated as \emph{unordered} tagged pairlists ("sets");
#' this currently also applies to \code{\link{slot}}s of S4 objects. It may
#' well be too strict to set \code{attrib.as.set = FALSE}.
#' @param ignore.bytecode logical indicating if byte code should be ignored
#' when comparing \code{\link{closure}}s.
#' @param ignore.environment logical indicating if their environments should
#' be ignored when comparing \code{closure}s.
#'
#' @return
#' A single logical value, \code{TRUE} or \code{FALSE}, never \code{NA}
#' and never anything other than a single value.
#'
#' @seealso
#' \code{\link{identical}}; \code{\link{all.equal}} for descriptions of how
#' two objects differ; \code{\link{Comparison}} for operators that generate
#' elementwise comparisons; \code{\link{isTRUE}} is a simple wrapper based
#' on \code{identical}; \code{\link{all_equal2}}
#'
#' @examples
#' identical2(1, 1.)
#' identical2(1, 1., 1L)
#'
#' ## for unusual R objects:
#' identical2(.GlobalEnv, environment(), globalenv(), as.environment(1))
#'
#' identical2(0., 0, -0.) ## not differentiated
#' identical2(0., 0, -0., num.eq = FALSE)
#'
#' identical2(NaN, -NaN)
#' identical2(NaN, -NaN, single.NA = FALSE) ## differ on bit-level
#'
#' ## for functions
#' f <- function(x) x
#' g <- compiler::cmpfun(f)
#' identical2(f, g)
#' identical2(f, g, ignore.bytecode = FALSE)
#'
#' @export
identical2 <- function(..., num.eq = TRUE, single.NA = TRUE,
attrib.as.set = TRUE, ignore.bytecode = TRUE,
ignore.environment = FALSE) {
if (length(l <- list(...)) < 2L)
stop('must provide at least two objects')
l <- sapply(seq.int((length(l) - 1L)), function(ii)
identical(l[ii], l[ii + 1L], num.eq = num.eq, single.NA = single.NA,
attrib.as.set = attrib.as.set, ignore.bytecode = ignore.bytecode,
ignore.environment = ignore.environment))
all(l)
}
#' Test if two or more objects are (nearly) equal
#'
#' A generalization of \code{\link{all.equal}} that allows more than two
#' objects to be tested for near-equality.
#'
#' @param ... any \code{R} objects
#' @param tolerance numeric >= 0; differences smaller than \code{tolerance}
#' are not reported (default value is close to 1.5e-8)
#' @param scale numeric scalar > 0 (or \code{NULL}), see details in
#' \code{\link{all.equal}}
#' @param check.attributes logical indicating if the \code{\link{attributes}}
#' should be compared
#' @param use.names logical indicating if \code{\link{list}} comparison should
#' report differing components by name (if matching) instead of integer index
#' @param all.names logical passed to \code{\link{ls}} indicating if "hidden"
#' objects should also be considered in the environments
#' @param check.names logical indicating if the \code{\link{names}}\code{(.)}
#' should be compared
#'
#' @return
#' If all \code{...} are nearly equal, \code{TRUE} otherwise returns a list
#' with the objects that failed.
#'
#' @seealso
#' \code{\link{all.equal}}; \code{\link{identical2}}; \code{\link{identical}}
#'
#' @examples
#' all_equal2(pi, 355/113, 22/7)
#' all_equal2(pi, 355/113, 22/7, tolerance = 0.01)
#'
#' all_equal2(cars[1], cars[, 1, drop = FALSE], cars[, -2, drop = TRUE])
#'
#' @export all_equal2
all_equal2 <- function(..., tolerance = .Machine$double.eps ^ 0.5,
scale = NULL, check.attributes = TRUE,
use.names = TRUE, all.names = TRUE,
check.names = TRUE) {
dots <- substitute(...())
l <- setNames(list(...), dots)
if (length(l <- list(...)) < 2L)
stop('must provide at least two objects')
l <- lapply(seq.int((length(l) - 1L)), function(x)
do.call('all.equal', list(
target = l[[x]], current = l[[x + 1L]],
tolerance = tolerance, check.attributes = check.attributes,
scale = scale, use.names = use.names, all.names = all.names))
)
trues <- c(TRUE, sapply(l, isTRUE))
trues[1L] <- trues[2L]
if (all(trues))
TRUE else dots[!trues]
}
#' Search function for data frames
#'
#' Searches a data frame column for matches.
#'
#' @param pattern string to find
#' @param data data frame to search
#' @param col.name column name in \code{data} to search
#' @param var variation; maximum distance allowed for a match; see
#' \code{\link{agrep}}
#' @param ignore.case logical; if \code{FALSE}, the pattern matching is
#' \emph{case-sensitive}, and if \code{TRUE}, case is ignored during matching
#' @param ... additional arguments passed to \code{\link{agrep}}
#'
#' @return
#' Subset of the original \code{data} where the \code{pattern} was found in
#' the specified \code{col.name}.
#'
#' @examples
#' dd <- data.frame(islands = names(islands)[1:32], mtcars)
#' search_df(New, dd, islands)
#' search_df(ho, dd, islands, var = 0.2) # too much variation
#' search_df(ho, dd, islands, var = 0)
#' search_df('Axel Hieberg', dd, islands) # misspelled, not enough variation
#' search_df('Axel Hieberg', dd, islands, var = 2)
#' search_df(19, dd, mpg)
#'
#' @export
search_df <- function(pattern, data, col.name, var = 0,
ignore.case = TRUE, ...) {
p <- as.character(substitute(pattern))
x <- as.character(substitute(col.name))
idx <- agrep(p, data[, x], ignore.case = ignore.case,
max.distance = var, ...)
data[idx, ]
}
#' Search history
#'
#' Searches \code{.Rhistory} file for pattern matches.
#'
#' @param x numeric or character; if numeric, shows the most recent \code{n}
#' lines in \code{.Rhistory}; if character, searches for pattern matches
#' @param ... additional arguments passed to \code{\link{grep}}
#'
#' @return
#' A list of recent commands that match \code{pattern}.
#'
#' @examples
#' search_hist()
#' search_hist(25)
#' search_hist('?')
#' search_hist('?', fixed = TRUE)
#' search_hist('\\?')
#'
#' @export
search_hist <- function (x, ...) {
hist <- tryCatch(readLines('.Rhistory'),
warning = function(w) message('No history found'),
finally = return(invisible(NULL)))
lhist <- length(hist)
if (is.numeric(x))
hist[lhist:(lhist - x + 1L)]
else if (is.character(x))
grep(x, readLines('.Rhistory'), value = TRUE, ...)
}
#' Apply summary functions over list or vector
#'
#' \code{fapply} applies summary function(s) over a vector, list, or data
#' frame, and \code{fapply_by} applies summary function(s) over subsets of
#' a data frame.
#'
#' @param data for \code{fapply}, a vector, list, or data frame to operate on;
#' for \code{fapply_by}, a data frame containing the variables in \code{formula}
#' @param ... summary function(s) such as \code{length(.)} or
#' \code{mean(., na.rm = TRUE)} to apply; names are not required but strongly
#' recommended
#' @param formula a formula such as \code{y ~ x} or \code{cbind(y1, y2) ~ x1 + x2}
#' where the \code{y} variables are numeric data to be split into groups
#' according to the grouping \code{x} variables (usually factors)
#'
#' @examples
#' tmp <- recoder(mtcars, 6, NA)
#' fapply(tmp, mean = mean(.), median = median(., na.rm = TRUE))
#' fapply(mtcars$mpg, mean = mean(.))
#'
#' ## define a new function
#' ci <- function(x) {
#' q <- quantile(x, c(0.025, 0.975), na.rm = TRUE)
#' sprintf('%.0f (%.2f, %.2f)', median(x), q[1], q[2] )
#' }
#' fapply(mtcars, median(.), '95% CI' = ci(.))
#'
#' ## compare:
#' t(fapply(mtcars, min(.), mean(.), max(.), length(.)))
#' summary(mtcars)
#'
#'
#' fapply_by(mpg ~ vs + am, mtcars, mean(.), median(.), length(.))
#' fapply_by(as.matrix(mtcars) ~ vs, mtcars, mean = mean(.))
#'
#' ## one ~ one, one ~ many, many ~ one, and many ~ many
#' fapply_by(disp ~ cyl, mtcars, mean = mean(.))
#' fapply_by(disp ~ cyl + vs, mtcars, mean = mean(.))
#' fapply_by(cbind(disp, wt) ~ cyl, mtcars, mean = mean(.))
#' fapply_by(cbind(disp, wt) ~ cyl + vs, mtcars, mean = mean(.), n = length(.))
#'
#' ## compare
#' aggregate(cbind(disp, wt) ~ cyl + vs, mtcars, function(x)
#' c(mean(x), length(x)))
#'
#' @export
fapply <- function(data, ...) {
cl <- match.call(expand.dots = FALSE)$`...`
if (is.null(cl))
stop('no methods given')
cl <- c(alist(i = NULL), cl)
if (any(nn <- !nzchar(names(cl))))
names(cl)[nn] <- sapply(cl, deparse)[nn]
if (!is.list(data))
data <- list(data)
res <- lapply(cl[-1L], function(fn)
mapply(function(.) eval(fn, NULL), data))
setNames(data.frame(res, stringsAsFactors = FALSE), names(cl)[-1L])
}
#' @rdname fapply
#' @export
fapply_by <- function(formula, data, ...) {
cl <- match.call(expand.dots = FALSE)$`...`
if (is.null(cl))
stop('no methods given')
cl <- c(alist(i = NULL), cl)
if (any(nn <- !nzchar(names(cl))))
names(cl)[nn] <- sapply(cl, deparse)[nn]
nt <- length(all.vars(formula[[3L]]))
ag <- aggregate(formula, data, function(.)
lapply(cl, function(fn) eval(fn, NULL)))
ag <- unclass(ag)
ll <- lapply(tail(ag, -nt), function(x) {
x <- data.frame(x, check.names = FALSE)[, -1L, drop = FALSE]
data.frame(lapply(x, unlist), check.names = FALSE)
})
## useful names if >1 lhs variable
# if (length(all.vars(formula[[2L]])) > 1L)
ll <- if (length(ag) > (nt + 1L))
lapply(seq_along(ll), function(ii) {
names(ll[[ii]]) <- paste(names(ll)[ii], names(ll[[ii]]), sep = '.')
ll[[ii]]
}) else ll[[length(ll)]]
cbind(data.frame(head(ag, nt), check.names = FALSE), ll)
}
#' Quietly try to require a package
#'
#' Quietly require a package, returning an error message if not installed.
#'
#' @param package name of package as name or character string
#'
#' @export
try_require <- function(package) {
package <- ifelse(!is.character(substitute(package)),
as.character(substitute(package)), package)
available <- suppressMessages(
suppressWarnings(
sapply(package, require, quietly = TRUE,
character.only = TRUE, warn.conflicts = FALSE)
))
missing <- package[!available]
if (length(missing) > 0L)
stop(paste(package, collapse = ', '), ' package not found.')
}
#' List to file
#'
#' Save a \emph{named} list of data frames or matrices into \code{R} data files
#' \code{.rda}, \code{.csv}, or \code{.txt} files.
#'
#' @param l a list of data frames or matrices
#' @param targetdir target directory (created if doesn't exist)
#' @param sep field separator string; default is none which results in
#' \code{.rda} data files; "\code{,}" creates \code{.csv} files; any other
#' separator will create \code{.dat} files
#' @param ... additional arguments passed to \code{\link{save}} if \code{sep}
#' is not given or to \code{\link{write.table}} if \code{sep} is given
#'
#' @return
#' \code{list2file} will create \code{length(l)} files in the \code{targetdir}.
#'
#' @examples
#' \dontrun{
#' dfl <- setNames(list(mtcars, iris), c('mtcars','iris'))
#'
#' ## .csv files
#' list2file(dfl, '~/desktop/tmp', sep = ',')
#'
#' ## r data files
#' list2file(dfl, '~/desktop/tmp')
#' }
#'
#' @export
list2file <- function(l, targetdir = getwd(), sep, ...) {
if (!islist(l))
stop('\'l\' must be a list')
if (is.null(names(l)) || any(is.na(names(l))))
stop('all elements of \'l\' must be named')
if (any(sapply(l, class) %ni% c('data.frame', 'matrix')))
stop('all elements of \'l\' should be class \'matrix\' or \'data.frame\'')
if (!file.exists(targetdir)) {
message(sprintf('creating directory:\n%s', targetdir))
dir.create(targetdir)
}
e <- new.env()
list2env(l, envir = e)
if (missing(sep))
sapply(names(l), function(x)
save(x, file = sprintf('%s/%s.rda', targetdir, x), ...))
else sapply(names(l), function(x)
write.table(get(x, envir = e), sep = sep, ...,
file = sprintf('%s/%s.%s', targetdir, x,
ifelse(sep == ',', 'csv', 'dat'))))
message(sprintf('NOTE: %s written to %s', iprint(names(l)), targetdir))
invisible(NULL)
}
#' Restart \code{R} session
#'
#' Ends current and restarts a clean \code{R} session.
#'
#' @param afterRestartCommand character string of command(s) to be
#' executed after restarting
#'
#' @examples
#' \dontrun{
#' Restart("clear(); cat('Here is a clean session just for you')")
#' }
#'
#' @export
Restart <- function(afterRestartCommand = '') {
(getOption('restart'))(afterRestartCommand)
}
# Reload <- function(...) {
# ## clean (rstudio) r session packages:
# pkgs <- c(".GlobalEnv", "tools:rstudio", "package:stats", "package:graphics",
# "package:grDevices", "package:utils", "package:datasets",
# "package:methods", "Autoloads", "package:base")
# to_unload <- setdiff(search(), pkgs)
#
# for (pkg in to_unload)
# try(detach(pkg, unload = TRUE, character.only = TRUE), silent = TRUE)
# rm(list = ls(envir = .GlobalEnv), envir = .GlobalEnv)
# cat('\014')
#
# invisible(NULL)
# }
#' Extract \code{R} help files
#'
#' Extracts specified portions of R help files (from \emph{loaded} libraries)
#' for use in Sweave or R-markdown documents.
#'
#' The \code{type} argument accepts:
#'
#' \tabular{llllll}{
#' \tab \code{text} \tab \tab \tab \tab plain text \cr
#' \tab \code{md_code} \tab \tab \tab \tab markdown code chunks; for use with
#' markdown documents when highlighted code is expected \cr
#' \tab \code{md_text} \tab \tab \tab \tab markdown plain text; for use with
#' markdown documents where regular text is expected \cr
#' \tab \code{sw_code} \tab \tab \tab \tab sweave code chunks; for use with
#' Sweave documents where highlighted code is expected \cr
#' \tab \code{sw_text} \tab \tab \tab \tab sweave plain text; for use with
#' Sweave documents where regular text is expected \cr
#' }
#'
#' To see the results in the console:
#'
#' \code{cat(helpExtract(print, type = 'md_text'))}
#'
#' To insert a (highlighted) chunk into a markdown document:
#'
#' \verb{
#' ```{r, results='asis'}
#' cat(helpExtract(print), sep ='\n')
#' ```
#' }
#'
#' To insert a (highlighted) chunk into a Sweave document:
#'
#' \verb{
#' \\Sexpr{knit_child(textConnection(helpExtract(print, type = 's_code')),
#' options = list(tidy = FALSE, eval = FALSE))}
#' }
#'
#' @param FUN a function as name or character string
#' @param show.sections logical; if \code{TRUE}, returns \code{section} options
#' for \code{FUN}
#' @param section section to extract (default is \code{"Usage"}
#' @param type type of character vector you want returned; default is
#' \code{"m_code"}, see details
#' @param ... additional arguments passed to \code{\link[utils]{help}}
#'
#' @return
#' A character vector to be used in a Sweave or Rmarkdown document.
#'
#' @examples
#' helpExtract(print)
#' cat(helpExtract(print, section = 'ex'), sep = '\n')
#' cat(helpExtract(print, type = 'md_text', section = 'description'))
#'
#' ## selecting multiple sections prints section names
#' cat(helpExtract(print, section = c('references', 'see also')), sep = '\n')
#'
#' @export
helpExtract <- function(FUN, show.sections = FALSE, section = 'Usage',
type = c('text','md_code','md_text',
'sw_code','sw_text'), ...) {
type <- match.arg(type)
FUN <- if (is.function(FUN))
deparse(substitute(FUN)) else as.character(FUN)
x <- helpExtract_(FUN, ...)
## section start lines
B <- grep('^_\b._\b._', x)
x <- gsub('_\b', '', x, fixed = TRUE)
if (show.sections)
return(gsub(':','', x[B]))
X <- rep_len(0L, length(x))
X[B] <- 1L
res <- split(x, cumsum(X))
res <- res[which(sapply(res, function(x)
any(Vectorize(grepl)(section, x[1L], ignore.case = TRUE))))]
# res <- unlist(sapply(res, '[', -(1:2)))
res <- if (length(section) > 1L)
unname(unlist(res)) else res[[1L]][-(1:2)]
while (TRUE) {
res <- res[-length(res)]
if (nzchar(res[length(res)]))
break
}
switch(type,
text = res,
md_code = c('```r', res, '```'),
sw_code = c('<<>>=', res, '@'),
md_text = paste(' ', res, collapse = '\n'),
sw_text = c('\\begin{verbatim}', res, '\\end{verbatim}')
)
}
helpExtract_ <- function(FUN, ...) {
# (helpExtract_('print'))
stopifnot(is.character(FUN))
## tools:::fetchRdDB
fetchRdDB <- function(filebase, key = NULL) {
fun <- function(db) {
vals <- db$vals
vars <- db$vars
datafile <- db$datafile
compressed <- db$compressed
envhook <- db$envhook
fetch <- function(key)
lazyLoadDBfetch(vals[key][[1L]], datafile, compressed, envhook)
if (length(key)) {
if (!key %in% vars)
stop(gettextf("No help on %s found in RdDB %s",
sQuote(key), sQuote(filebase)), domain = NA)
fetch(key)
} else {
res <- lapply(vars, fetch)
names(res) <- vars
res
}
}
res <- lazyLoadDBexec(filebase, fun)
if (length(key))
res else invisible(res)
}
## utils:::.getHelpFile
getHelpFile <- function(file) {
path <- dirname(file)
dirpath <- dirname(path)
if (!file.exists(dirpath))
stop(gettextf("invalid %s argument", sQuote("file")), domain = NA)
pkgname <- basename(dirpath)
RdDB <- file.path(path, pkgname)
if (!file.exists(paste(RdDB, "rdx", sep = ".")))
stop(gettextf(paste("package %s exists but was not installed under R',
'>= 2.10.0 so help cannot be accessed"),
sQuote(pkgname)), domain = NA)
fetchRdDB(RdDB, basename(file))
}
x <- capture.output(
tools::Rd2txt(getHelpFile(utils::help(FUN, ...)),
options = list(sectionIndent = 0))
)
invisible(x)
}
#' Round vector to target sum
#'
#' Rounds a numeric vector constrained to sum to a \code{target} value.
#'
#' @param x numeric values
#' @param target desired sum of \code{x} after rounding
#'
#' @seealso
#' \code{\link{roundr}}; \code{\link{round_to}}
#'
#' @examples
#' pcts <- data.frame(
#' pct1 = c(33.3, 21.5, 45.51),
#' pct2 = c(33.3, 33.3, 33.3)
#' )
#'
#' ## base round
#' colSums(mapply(round, pcts))
#'
#' ## round to target
#' colSums(mapply(Round, pcts, 100))
#'
#' @export
Round <- function(x, target = NULL) {
r.x <- round(x)
diff.x <- r.x - x
if (is.null(target) || (s <- sum(r.x)) == target)
return(r.x)
if (s > target) {
select <- seq_along(x)[diff.x != 0]
wh <- which.max(diff.x[select])
x[select[wh]] <- r.x[select[wh]] - 1
} else {
select <- seq_along(x)[diff.x != 0]
wh <- which.min(diff.x[select])
x[select[wh]] <- r.x[select[wh]] + 1
}
Recall(x, target)
}
#' Round to
#'
#' Round numerics to nearest multiple of \code{to}.
#'
#' @param x a numeric vector
#' @param to nearest fraction or integer
#'
#' \code{\link{roundr}}; \code{\link{Round}}
#'
#' @examples
#' x <- 1:20 / 10
#' round_to(x, 1)
#' round_to(x, 0.5)
#'
#' @export
round_to <- function(x, to = 1) {
to <- abs(to)
round(x / to) * to
}
#' Update \code{R}
#'
#' Copies and updates \code{R} libraries from most recent installed version
#' into the current \code{\link{.libPaths}} directory. This assumes that the
#' user has installed a new \code{X.x} version of \code{R} but will not copy
#' any libraries from previous frameworks into the new library.
#'
#' @param update logical; if \code{TRUE}, checks for available packages
#' updates, downloads, and installs
#'
#' @seealso
#' \code{\link{update.packages}}
#'
#' @export
updateR <- function(update = TRUE) {
path <- file.path(R.home(), '..', '..')
v <- tail(sort(list.files(path, pattern = '^\\d{1}.\\d{1}$')), 2L)
if (!grepl(v[2L], .libPaths()))
stop('A more recent version of R was found on your system\n')
if (file.exists(v_last <- sub(v[2L], v[1L], .libPaths()))) {
pkg <- list.files(.libPaths())
pkg <- setdiff(list.files(v_last), pkg)
if (length(pkg) > 0L) {
cat(sprintf("Copying %s package%s to %s\n", length(pkg),
ifelse(length(pkg) > 1L, 's', ''), .libPaths()))
file.copy(file.path(v_last, pkg), .libPaths(), recursive = TRUE)
} else cat('No packages to copy\n')
}
if (update) {
if ((up <- table(packageStatus()$inst$Status)['upgrade']) > 0L) {
cat(sprintf('Updating %s package%s\n', up, ifelse(up > 1L, 's', '')))
update.packages(ask = FALSE)
} else cat('All packages are up-to-date\n')
}
}
#' Read data from clipboard
#'
#' Reads data (comma-, tab-, or fixed-width separated) data from clipboard and
#' returns as a data frame.
#'
#' @param header logical; indicates if variable names are in first line
#' @param ... additional arguments passed to \code{\link{read.table}}
#'
#' @seealso
#' \code{\link{read.table}}; \code{\link{read.fwf}}
#'
#' @export
read_clip <- function(header = TRUE, ...) {
if (Sys.info()['sysname'] %ni% 'Darwin')
read.table(file = 'clipboard', header = header, ...)
else read.table(file = pipe('pbpaste'), header = header, ...)
}
#' @rdname read_clip
#' @param sep separator as a character string
#' @export
read_clip.csv <- function(header = TRUE, sep = ',', ...) {
read_clip(header = header, sep = sep, ...)
}
#' @rdname read_clip
#' @export
read_clip.tab <- function(header = TRUE, sep = '\t', ...) {
read_clip(header = header, sep = sep, ...)
}
#' @rdname read_clip
#' @param widths a vector of widths of the fixed-width fields or a list of
#' vectors giving the widths for multiple lines
#' @export
read_clip.fwf <- function(header = TRUE, widths, ...) {
if (Sys.info()['sysname'] %ni% 'Darwin')
read.fwf(file = 'clipboard', header = header, widths = widths, ...)
else read.fwf(file = pipe('pbpaste'), header = header, widths = widths, ...)
}
#' Index columns by pattern
#'
#' Quickly selects and returns columns from a matrix or data frame by
#' \code{\link{grep}}'ing for a desired \code{pattern}.
#'
#' @param x a matrix or data frame
#' @param pattern pattern to match
#' @param keep optional vector of names of other columns to keep
#' @param ... additional parameters passed to \code{\link{grep}}
#'
#' @examples
#' icols(iris, 'Petal')
#' icols(iris, '\\.')
#' icols(mtcars, '^[\\w]{2}$')
#'
#' @export
icols <- function(x, pattern, keep, ...) {
keep <- if (missing(keep))
NULL else which(colnames(x) %in% keep)
x[, c(keep, grep(pattern, colnames(x), perl = TRUE, ...)), drop = FALSE]
}
#' Fill data frame
#'
#' Fills data frame, \code{data}, containing \code{NA} values using a look-up
#' table, \code{key}. \code{ids} and \code{fill} columns must be in both
#' \code{data} and \code{key}. If neither are given, \code{fill_df} will
#' smartly try to guess which columns need to be filled with the values from
#' the look-up table.
#'
#' @param data a data frame to recode
#' @param key a look-up table data frame
#' @param ids columns treated as id variables, as character strings or indices
#' @param fill columns to recode, as character strings or column indices
#' @param values optional vector of values to recode with \code{fill}; if
#' missing (default), \code{fill_df} only looks for \code{NA}s in \code{data};
#' otherwise, all occurrences of \code{values} will be replaced with
#' \code{NA}, and \code{fill_df} will procede normally
#'
#' @return
#' A data frame with \code{NA}s from \code{fill}-columns recoded to match
#' the values from \code{key}.
#'
#' @seealso
#' \code{\link{recoder}}; \code{\link{locf}}
#'
#' @examples
#' dd <- mtcars
#' dd[matrix(sample(c(TRUE, FALSE), 32 * 11, replace = TRUE), 32)] <- NA
#' identical(mtcars, fill_df(dd, mtcars)) ## TRUE
#'
#' ## recode other variables instead of NAs
#' nn <- sum(is.na(dd))
#' dd[is.na(dd)] <- sample(-10:-1, nn, replace = TRUE)
#' identical(mtcars, fill_df(dd, mtcars, values = -1:-10)) ## TRUE
#'
#' f <- function(x, n = 20) sample(x, size = n, replace = TRUE)
#' set.seed(1)
#' key_df <- data.frame(id = c(1,2,1,2), group = c(3,3,4,4),
#' x = c(100, 200, 300, 400), y = I(LETTERS[1:4]))
#' na_df <- data.frame(id = f(1:2), group = f(3:4),
#' x = f(c(0, NA)), y = I(f(c('', NA))), z = 1)
#'
#' ## auto: all cols with no NAs == ids; cols with any NAs = fill
#' fill_df(na_df, key_df)
#'
#' ## select which to be filled and returned
#' fill_df(na_df, key_df, ids = 1:2, fill = 'x')
#' fill_df(na_df, key_df, ids = 1:2, fill = 4)
#'
#' @export
fill_df <- function(data, key, ids, fill, values) {
nn <- names(data)
## if given replace "values" with NAs
if (!missing(values)) {
idx <- data
idx[] <- lapply(data, function(x) x %in% values)
data[as.matrix(idx)] <- NA
}
## get columns names not defined as ids or fill
if (length(whk <- which(nn %ni% names(key)))) {
whk <- nn[whk]
keep <- data[, whk, drop = FALSE]
data[, whk] <- NULL
} else keep <- NULL
## error checks
nd <- names(data)
nad <- vapply(data, anyNA, logical(1L))
if (all(!nad))
return(data)
## try to guess columns to use for ids/fill
ids <- if (missing(ids)) {
ids <- nd[which(!nad)]
message('\'ids\' : ', paste(ids, collapse = ', '), domain = NA)
ids
} else if (is.numeric(ids)) nd[ids] else ids
fill <- if (missing(fill)) {
fill <- nd[which(nad)]
message('\'fill\': ', paste(fill, collapse = ', '), domain = NA)
fill
} else if (is.numeric(fill))
nd[fill] else fill
## match current data rows with rows in key and fill NAs
ok <- all(nad)
nak <- if (ok)
seq.int(nrow(data)) else do.call('paste0', c(key[, ids, drop = FALSE]))
dfk <- if (ok)
seq.int(nrow(data)) else do.call('paste0', c(data[, ids, drop = FALSE]))
mm <- match(dfk, nak)
for (col in fill) {
nnr <- which(is.na(data[, col]))
data[nnr, col] <- key[mm[nnr], col]
}
# data[do.call('order', as.list(data[, c(nnk, nnf)])), ]
if (!is.null(keep))
cbind.data.frame(data, keep)[, nn, drop = FALSE]
else data[, nn, drop = FALSE]
}
#' Kinda sort
#'
#' @description
#' \code{\link{sort}} a vector but not very well.
#'
#' For a vector, \code{x}, \code{n} elements will be randomly selected, and
#' their positions will remain unchanged as all other elements are sorted.
#' Alternatively, a vector of \code{indices} of \code{x} can be given and
#' will remain unsorted.
#'
#' @param x a numeric, complex, character, or logical vector
#' @param n number of elements of x to remain unsorted (the default is
#' approximately 10\% of \code{x}), ignored if \code{indices} is given
#' @param decreasing logical; if \code{FALSE} (default), \code{x} is sorted
#' in increasing order
#' @param indices a vector of indices specifying which elements of \code{x}
#' should \emph{not} be sorted
#' @param index.return logical; if \code{TRUE}, the ordering index vector is
#' returned
#'
#' @return
#' \code{x} sorted approximately \code{(length(x) - n)/length(x)*100} percent.
#'
#' @seealso
#' \code{\link{sort2}}; \code{\link{sym_sort}}
#'
#' @examples
#' set.seed(1)
#' x <- sample(1:10)
#'
#' rbind(
#' unsorted = x,
#' '50% sort' = kinda_sort(x, n = 5),
#' 'fix 2:5' = kinda_sort(x, indices = 2:5)
#' )
#'
#' # [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
#' # unsorted 3 4 5 7 2 8 9 6 10 1
#' # 50% sort 3 4 5 6 2 8 7 9 10 1
#' # fix 2:5 1 4 5 7 2 3 6 8 9 10
#'
#'
#' ## use index.return = TRUE for indices instead of values
#' set.seed(1)
#' x <- runif(100)
#' o1 <- kinda_sort(x, n = 50, index.return = TRUE)
#'
#' set.seed(1)
#' x <- runif(100)
#' o2 <- kinda_sort(x, n = 50)
#'
#' stopifnot(
#' identical(x[o1], o2)
#' )
#'
#' @export
kinda_sort <- function(x, n, decreasing = FALSE, indices = NULL,
index.return = FALSE) {
l <- length(x)
n <- if (missing(n))
ceiling(0.1 * l) else if (n > l) l else n
if ((n <- as.integer(n)[1L]) == 0L)
return(x)
k <- sort(indices %||% sample(seq.int(l), n))
s <- replace(x, k, NA)
o <- sort2(s, decreasing, TRUE)
if (index.return)
o else x[o]
}
#' Symmetrical sort
#'
#' Sort a vector symmetrically, i.e., the two most extreme values are put
#' at opposite ends and repeated until the median value(s) is(are) put in
#' the middle of the sorted vector.
#'
#' @param x a numeric, complex, character, or logical vector
#' @param rev logical; if \code{TRUE}, vectors are sorted in reverse
#' @param index.return logical; if \code{TRUE}, the ordering index vector
#' is returned
#'
#' @seealso
#' \code{\link{kinda_sort}}
#'
#' @examples
#' sym_sort(letters)
#' sym_sort(letters, rev = TRUE)
#'
#' x <- runif(50)
#' plot(sym_sort(x))
#' plot(x[sym_sort(x, index.return = TRUE)])
#'
#' plot(sym_sort(x, rev = TRUE))
#' plot(-sym_sort(-x, rev = TRUE))
#'
#' @export
sym_sort <- function(x, rev = FALSE, index.return = FALSE) {
if (length(x) <= 1L)
return(x)
if (index.return)
names(x) <- seq_along(x)
rev <- if (rev)
0:1 else 1:0
s <- sort(x)
f <- rep_len(1:2, length(s))
sp <- split(s, f)
sp <- Vectorize(sort, SIMPLIFY = FALSE)(sp, decreasing = !!rev)
nn <- unlist(sapply(sp, names))
res <- unlist(c(sp))
names(res) <- nn
if (index.return)
as.integer(nn)
else res
}
#' Generate random gene names
#'
#' Generate random character strings from pools of letters and digits.
#'
#' @param n number of gene names to return
#' @param alpha vector of letters to select from
#' @param nalpha range of possible number of \code{alpha} to select
#' @param num numerics to select from
#' @param nnum range of possible number of \code{num} to select
#' @param sep character to separate \code{alpha} and \code{num}
#' @param seed seed; integer or \code{NULL}
#'
#' @examples
#' rgene()
#' rgene(5, alpha = 'ABCD', nalpha = 1, nnum = 5:6)
#' rgene(5, alpha = c('A','T','C','G'), num = '', sep = '')
#'
#' @export
rgene <- function(n = 1L, alpha = LETTERS[1:5], nalpha = 2:5,
num = 0:9, nnum = 1:5, sep = '-', seed = NULL) {
## helpers
p0 <- function(...) paste0(..., collapse = '')
alphas <- function() sample(alpha, sample(nalpha, 1), TRUE)
numerics <- function() sample(num, sample(nnum, 1), TRUE)
set.seed(seed)
replicate(n, p0(p0(alphas()), sep, p0(numerics())))
}
#' Install packages temporarily
#'
#' This function will create a temporary \code{.libPath}, install, and load
#' packages for use in a single \code{R} session. \cr \cr To install a repo
#' from github temporarily, use \code{\link[withr]{with_libpaths}}.
#'
#' @param pkgs character vector of the names of packages whose current
#' versions should be downloaded from the repositories
#' @param lib character vector giving the library directories where to install
#' \code{pkgs}; recycled as needed; if missing (default), a
#' \code{\link{tempdir}} will be created
#' @param ... additional arguments passed to
#' \code{\link[utils]{install.packages}}
#'
#' @examples
#' \dontrun{
#' install_temp(c('devtools', 'testthat'))
#' }
#'
#' @export
install_temp <- function(pkgs, lib, ...) {
if (missing(lib))
lib <- tempdir()
## resetting libPaths before restarting r session may not be desired
# lp <- .libPaths()
# on.exit(.libPaths(lp))
.libPaths(lib)
utils::install.packages(pkgs = pkgs, lib = lib, ...)
for (ii in pkgs)
require(ii, character.only = TRUE)
invisible(NULL)
}
#' Merge nested lists
#'
#' Recursive functions to merge nested lists.
#'
#' \code{nestedmerge} recursively calls itself to merge similarly-structured
#' named \emph{or} unnamed lists. Unnamed lists results in a "horizontal"
#' merge; named lists will be matched based on names. In either case, the
#' matching element (or list(s) of elements(s)) should also have the same
#' structure.
#'
#' \code{nestedMerge} is a convenience wrapper for \code{nestedmerge} in cases
#' where list \code{a} contains elements not in list \code{b}. If using
#' \code{nestedmerge} in this case, only elements of list \code{a} will be
#' merged and returned.
#'
#' @param x,y lists
#'
#' @seealso
#' \code{\link{clist}}; adapted from
#' \url{http://stackoverflow.com/questions/23483421/combine-
#' merge-lists-by-elements-names-list-in-list}
#'
#' @examples
#' ## l1 and l2 have similar structures
#' l1 <- list(a = list(1:2, NULL), b = list(1:3, NULL), c = list(1:5))
#' l2 <- list(a = list(NULL, 0:1), b = list(NULL, 4:6))
#' l3 <- list(a = list(NULL, 0:1), b = list(4:6))
#'
#' nestedMerge(l1, l2)
#'
#' ## "fails" for `b` since `l1$b` and `l3$b` are not structured similarly
#' nestedMerge(l1, l3)
#'
#' l1 <- list(integers = 1:3, letters = letters[1:3],
#' words = c('two','strings'), rand = rnorm(5))
#' l2 <- list(letters = letters[24:26], booleans = c(TRUE, TRUE, FALSE),
#' words = 'another', floating = c(1.2, 2.4),
#' integers = 1:3 * 10)
#'
#' nestedMerge(l1, l2)
#'
#' ## compare to
#' nestedmerge(l1, l2)
#'
#' @export
nestedMerge <- function(x, y) {
if (missing(y))
return(x)
if (islist(x) & islist(y)) {
nn <- setdiff(names(y), names(x))
x <- c(x, setNames(vector('list', length(nn)), nn))
}
nestedmerge(x, y)
}
#' @rdname nestedMerge
#' @export
nestedmerge <- function(x, y) {
if (missing(y))
return(x)
if (islist(x) & islist(y)) {
res <- list()
if (!is.null(names(x))) {
for (nn in names(x)) {
res <- if (nn %in% names(y) && !is.null(y[[nn]]))
append(res, c(Recall(x[[nn]], y[[nn]]))) else
append(res, list(x[[nn]]))
names(res)[length(res)] <- nn
}
} else {
for (ii in seq_along(x))
res <- if (ii <= length(y) && !is.null(y[[ii]]))
append(res, Recall(x[[ii]], y[[ii]])) else
append(res, list(x[[ii]]))
}
res
} else list(c(x, y))
}
#' Extract parts of file path
#'
#' These functions will extract the directory, file name, and file extension
#' of some common types of files. Additionally, \code{path_extract} will
#' check its results by recreating \code{path} and will give warnings if
#' the results fail to match the input.
#'
#' \code{fname} and \code{path_extract} do the text processing;
#' \code{file_name} and \code{file_ext} are convenience functions that only
#' return the file name or file extension, respectively.
#'
#' @note
#' Known examples where this function fails:
#' \itemize{
#' \item{\code{.tar.gz} }{files with compound file extensions}
#' }
#'
#' @param path file path as character string
#'
#' @seealso \code{\link[rawr]{regcaptures}}; \code{\link{basename}};
#' \code{\link{dirname}}
#'
#' @examples
#' l <- list(
#' '~/desktop/tmp.csv', ## normal file with directory
#' '.dotfile.txt', ## dotfile with extension
#' '.vimrc', ## dotfile with no extension
#' '~/file.', ## file name ending in .
#' '~/DESCRIPTION', ## no extension
#' '~/desktop/tmp/a.filename.tar.gz' ## compound extension fails
#' )
#'
#' setNames(lapply(l, path_extract), l)
#' setNames(lapply(l, fname), l)
#' setNames(lapply(l, file_name), l)
#' setNames(lapply(l, file_ext), l)
#'
#' @export
path_extract <- function(path) {
p <- normalizePath(path, mustWork = FALSE)
m <- cbind(dirname = dirname(p), basename = basename(p), fname(p))
mm <- file.path(
m[, 'dirname'],
paste(m[, 'filename'], m[, 'extension'],
sep = ifelse(nzchar(m[, 'extension']), '.', ''))
)
if (gsub('\\./', '', mm) != p || !nzchar(m[, 'filename']))
warning('Results could not be validated', domain = NA)
m
}
#' @rdname path_extract
#' @export
fname <- function(path) {
xx <- basename(path)
pp <- '(^\\.[^ .]+$|[^:\\/]*?[.$]?)(?:\\.([^ :\\/.]*))?$'
`colnames<-`(regcaptures2(xx, pp)[[1L]], c('filename', 'extension'))
}
#' @rdname path_extract
#' @export
file_name <- function(path) {
path_extract(path)[, 'filename']
}
#' @rdname path_extract
#' @export
file_ext <- function(path) {
path_extract(path)[, 'extension']
}
#' @rdname path_extract
#' @export
rm_ext <- function(path) {
gsub('(^\\.[^ .]+$|[^:\\/]*?[.$]?)(?:\\.([^ :\\/.]*))?$',
'\\1', path, perl = TRUE)
}
#' Multiple pattern matching and replacement
#'
#' Perform multiple pattern matching and replacement.
#'
#' @param pattern for substituting, a vector of length two for a single
#' replacement or a \emph{list} of length two vectors for multiple
#' replacements where each vector is \code{c(pattern,replacement)}; or for
#' grepping, a vector of character strings containing regular expressions
#' to be matched in \code{x}
#' @param x a character vector where matches are sought
#' @param ... additional parameters passed onto other methods
#' @param parallel logical; if \code{TRUE}, grepping will be performed in
#' \pkg{\link{parallel}}; also, if \code{pattern} is a vector greater than
#' \code{1e4} elements in length, \code{parallel} defaults to \code{TRUE}
#' @param replacement optional; if given, both \code{pattern} and
#' \code{replacement} should be character vectors of equal length
#' (\code{replacement} will be recycled if needed)
#'
#' @seealso
#' \code{\link[base]{grep}}; \code{\link{vgrep}}
#'
#' @examples
#' ## grepping
#' mgrep(letters[1:5], letters[1:5])
#' mgrepl(letters[1:5], letters[1:5])
#'
#' ## subbing
#' s1 <- 'thiS iS SooD'
#'
#' ## if replacement is given, acts like gsub
#' mgsub(c('hi', 'oo'), c('HI', '00'), s1)
#' mgsub(c('\\bS','$','i'), '_', rep(s1, 3))
#'
#' ## pattern can also be a list of c(pattern, replacement)
#' r1 <- c('hi','HI')
#' r2 <- c(list(r1), list(c('oo', '00')))
#' r3 <- c(r2, list(c('i', '1'), c('\\b(\\w)', '\\U\\1')))
#'
#' mgsub(r1, x = s1, ignore.case = TRUE)
#' mgsub(r2, x = s1)
#' mgsub(r3, x = s1, perl = TRUE)
#'
#' @name mgrep
NULL
mgrep_ <- function(parallel, FUN, vlist, ...) {
pattern <- vlist$pattern
x <- vlist$x
if (parallel) {
## if parallel = TRUE or long vector x (>1e4), run in parallel
requireNamespace('parallel')
cl <- makeCluster(nc <- getOption('cl.cores', detectCores()))
on.exit(stopCluster(cl))
clusterExport(cl = cl, varlist = c('x', 'pattern'), envir = environment())
parLapply(cl, seq_along(pattern),
function(ii) FUN(pattern = pattern[ii], x = x, ...))
} else {
## slow version
lapply(seq_along(pattern), function(ii)
FUN(pattern = pattern[ii], x = x, ...))
}
}
#' @rdname mgrep
#' @export
mgrepl <- function(pattern, x, ..., parallel = length(pattern) > 1e4) {
mgrep_(parallel = parallel, FUN = base::grepl, ...,
vlist = list(pattern = pattern, x = x))
}
#' @rdname mgrep
#' @export
mgrep <- function(pattern, x, ..., parallel = length(pattern) > 1e4) {
mgrep_(parallel = parallel, FUN = base::grep, ...,
vlist = list(pattern = pattern, x = x))
}
msub_ <- function(pattern, replacement, x, ..., FUN) {
dots <- match.call(expand.dots = FALSE)$...
FUN <- match.fun(FUN)
if (!missing(replacement))
pattern <- as.list(data.frame(
t(cbind(I(pattern), I(rep_len(replacement, length(pattern)))))))
if (!is.list(pattern))
pattern <- list(pattern)
sub2 <- function(l, x)
do.call(FUN, c(list(x = x, pattern = l[1L], replacement = l[2L]), dots))
Reduce('sub2', pattern, x, right = TRUE)
}
#' @rdname mgrep
#' @export
msub <- function(pattern, replacement, x, ...) {
msub_(pattern, replacement, x, ..., FUN = 'sub')
}
#' @rdname mgrep
#' @export
mgsub <- function(pattern, replacement, x, ...) {
msub_(pattern, replacement, x, ..., FUN = 'gsub')
}
#' Flatten lists
#'
#' Flattens lists and nested lists of vectors, matrices, and/or data frames.
#'
#' @param l a list
#'
#' @references
#' \url{https://stackoverflow.com/q/8139677/2994949}
#'
#' @examples
#' l <- list(matrix(1:3), list(1:3, 'foo'), TRUE, 'hi',
#' list(mtcars[1:5, 1:5], list(mtcars[1:5, 1:5])))
#' str(l)
#' str(flatten(l))
#'
#' @export
flatten <- function(l) {
while (any(vapply(l, islist, logical(1L)))) {
l <- lapply(l, function(x)
if (islist(x))
x else list(x))
l <- unlist(l, recursive = FALSE)
}
l
}
#' tree
#'
#' List contents of directories in a tree-like format.
#'
#' @param path file name path as character string
#' @param full.names logical; if \code{TRUE}, the full file path will be
#' returned; otherwise, only the \code{\link{basename}} is returned (default)
#' @param ndirs,nfiles maximum number of directories and files per directory
#' to print
#'
#' @references
#' \url{https://stackoverflow.com/q/14188197/2994949}
#'
#' @examples
#' str(tree(system.file(package = 'rawr'), FALSE))
#'
#' @export
tree <- function(path = '.', full.names = FALSE, ndirs = 5L, nfiles = 5L) {
## helper
tree_ <- function(path = '.', full.names, n) {
isdir <- file.info(path)$isdir
n <- as.integer(n)
res <- if (!isdir) {
if (full.names)
path else basename(path)
} else {
files <- list.files(path, full.names = TRUE, include.dirs = TRUE)
isdir <- file.info(files)$isdir
files <- files[isdir | cumsum(!isdir) <= n]
res <- lapply(files, tree_, full.names, n)
names(res) <- basename(files)
res
}
res
}
path <- normalizePath(path, mustWork = TRUE)
head(tree_(path, full.names, nfiles), ndirs)
}
#' Recursive \code{rm} for lists
#'
#' Remove \code{NULL} or \code{list(NULL)} objects recursively from a list.
#'
#' @param l a list
#' @param rm_list logical; if \code{FALSE}, lists with only the \code{NULL}
#' object will not be removed
#'
#' @references
#' \url{https://stackoverflow.com/q/26539441/2994949}
#'
#' @examples
#' str(l <- list(list(NULL),list(1),list('a', NULL)))
#' str(rm_null(l))
#' str(rm_null(l, FALSE))
#'
#' @export
rm_null <- function(l, rm_list = TRUE) {
isnull <- if (rm_list)
function(x) is.null(x) | all(vapply(x, is.null, logical(1L)))
else function(x) is.null(x)
x <- Filter(Negate(isnull), l)
lapply(x, function(x)
if (is.list(x))
rm_null(x, rm_list) else x)
}
#' Cumulative functions
#'
#' @description
#' \code{cum_reset} will reset a cumulative function, \code{FUN}, when
#' \code{value} is encountered.
#'
#' \code{*_na} functions offer alternatives to the \pkg{base}
#' \link[=cumsum]{cumulative functions} that can handle \code{NA}s.
#'
#' \code{cum_mid} finds the mid-points between "stacked" numeric values.
#'
#' @param x a vector (or numeric matrix for \code{cum_mid})
#' @param value a value of \code{x} which signals the end of a group and
#' resets \code{FUN}
#' @param FUN function to apply to each group, usually one of
#' \code{\link{cumsum}}, \code{\link{cumprod}}, \code{\link{cummax}}, or
#' \code{\link{cummin}} but can be any function that returns a vector the
#' same length and type as the input (\emph{a la} \code{\link{ave}})
#' @param useNA logical; if \code{TRUE}, indices with \code{NA} will be
#' unchanged; if \code{FALSE}, the previous value is carried forward
#' @param adj for \code{cum_mid}, an adjustment parameter, usually in
#' \code{[0, 1]}, giving the relative position between each value (default
#' is centered, \code{adj = 0.5})
#'
#' @return
#' A vector having the same length as \code{x} with \code{FUN} applied to
#' each group defined by positions of \code{value}.
#'
#' @seealso
#' \code{\link{cumsum}}; \code{\link{ave}}; \code{locf}
#'
#' @examples
#' x <- 1:10
#' cum_reset(x, 5, cummin)
#' cum_reset(x, c(5, 8), cummin)
#'
#' x[x %% 4 == 0] <- 0
#' cum_reset(x, FUN = cumsum)
#' cum_reset(x, FUN = sum)
#'
#' set.seed(1)
#' data.frame(x = x <- rpois(15, 1),
#' y = cum_reset(x, FUN = cumsum),
#' z = cum_reset(x, 0, function(x) ave(x, FUN = sum)))
#'
#'
#' ## x need not be numeric if FUN returns an appropriate type and length
#' cum_reset(letters[1:10], c('d','g'), function(x)
#' letters[as.numeric(factor(x))])
#'
#'
#' ## cum* functions to handle NA values
#' x <- 1:10
#' x[x %% 4 == 0] <- 0
#' na <- ifelse(x == 0, NA, x)
#'
#' cumsum(x)
#' cum_na(x, cumsum)
#'
#' cumsum(na)
#' cum_na(na, cumsum)
#'
#' ## shorthand
#' cumsum_na(na)
#' cumsum_na(na)
#'
#'
#' ## like cum_reset, cum_na's FUN argument can be generalized if FUN
#' ## returns the correct class and length of the input
#' FUN <- function(x) vector(class(x), length(x))
#' cum_na(na, FUN)
#'
#' cumdiff <- function(x) Reduce(`-`, x, accumulate = TRUE)
#' cumdiff(x)
#' cumsum(c(x[1L], -x[-1L]))
#'
#' cumdiff(na)
#' cumsum(c(na[1L], -na[-1L]))
#' cum_na(na, cumdiff)
#'
#'
#' ## "stacked" numeric values, eg, from a barplot
#' set.seed(1)
#' x <- matrix(runif(12), ncol = 3L)
#' bp <- barplot(x, names.arg = paste('adj = ', c(0, 1, 0.5)))
#'
#' for (ii in seq.int(ncol(x))) {
#' xii <- x[, ii, drop = FALSE]
#' text(bp[ii], cum_mid(xii, c(0, 1, 0.5)[ii]), xii, xpd = NA)
#' }
#'
#' @name cumfuns
NULL
#' @rdname cumfuns
#' @export
cum_reset <- function(x, value = 0L, FUN) {
FUN <- match.fun(FUN)
idx <- c(0L, head(cumsum(x %in% value), -1L))
unname(unlist(lapply(split(x, idx), FUN)))
}
#' @rdname cumfuns
#' @export
cum_na <- function(x, FUN, useNA = TRUE) {
FUN <- match.fun(FUN)
x[!is.na(x)] <- FUN(x[!is.na(x)])
if (useNA)
x else locf(x)
}
#' @rdname cumfuns
#' @export
cumsum_na <- function(x, useNA = TRUE) {
cum_na(x, cumsum, useNA)
}
#' @rdname cumfuns
#' @export
cumprod_na <- function(x, useNA = TRUE) {
cum_na(x, cumprod, useNA)
}
#' @rdname cumfuns
#' @export
cummax_na <- function(x, useNA = TRUE) {
cum_na(x, cummax, useNA)
}
#' @rdname cumfuns
#' @export
cummin_na <- function(x, useNA = TRUE) {
cum_na(x, cummin, useNA)
}
#' @rdname cumfuns
#' @export
cum_mid <- function(x, adj = 0.5) {
stopifnot(adj %inside% 0:1)
mat <- as.matrix(x)
res <- rbind(0, mat[-nrow(mat), , drop = FALSE])
res <- mat / (1 / adj) + apply(res, 2L, cumsum)
if (is.null(dim(x)))
drop(res) else res
}
#' \code{grep} for vectors
#'
#' \code{grep} vectors for patterns given by other vectors.
#'
#' @param pattern a vector to be matched
#' @param x vector having the same type as \code{pattern} where matches are
#' sought
#'
#' @return
#' For \code{vgrep}, a vector of indices indicating the start of the matches
#' found in \code{x}. For \code{vgrepl}, a list of logical vetors of
#' \code{length(x)} for each match found in \code{x}.
#'
#' @references
#' Adapted from \url{https://stackoverflow.com/q/33027611/2994949}
#'
#' @seealso
#' \code{\link{grep}}; \code{\link[rawr]{mgrep}}; \code{\link[rawr]{\%==\%}}
#'
#' @examples
#' x <- c(0,1,1,0,1,1,NA,1,1,0,1,1,NA,1,0,0,1,
#' 0,1,1,1,NA,1,0,1,NA,1,NA,1,0,1,0,NA,1)
#' vgrep(c(1, NA, 1), x)
#' vgrepl(c(1, NA, 1), x)
#'
#' vgrep(c(1, 0, 1, NA), x)
#' which(vgrepl(c(1, 0, 1, NA), x)[[1]])
#'
#' @export
vgrep <- function(pattern, x) {
vgrep_ <- function(pp, xx, acc = if (length(pp))
seq_along(xx) else integer(0L)) {
if (!length(pp))
return(acc)
Recall(pp[-1L], xx, acc[which(pp[[1L]] %==% xx[acc])] + 1L)
}
vgrep_(pattern, x) - length(pattern)
}
#' @rdname vgrep
#' @export
vgrepl <- function(pattern, x) {
m <- vgrep(pattern, x)
lp <- length(pattern)
pp <- rep(FALSE, length(x))
if (!length(m))
integer(0L) else lapply(m, function(y) {
pp[y:(y + lp - 1L)] <- TRUE
pp
})
}
#' Justify text
#'
#' Add whitespace to (monospaced) text for justified or block-style spacing.
#'
#' @param string a character string
#' @param width desired width text in characters given as a positive integer
#' @param fill method of adding whitespace, i.e., by starting with the
#' \code{"right"}- or \code{"left"}-most whitespace or \code{"random"}
#'
#' @seealso
#' \code{\link{strwrap}}
#'
#' @references
#' Adapted from \url{https://stackoverflow.com/q/34710597/2994949}
#'
#' @examples
#' x <- paste(rownames(mtcars), collapse = ' ')
#' cat(justify(x))
#'
#' ## slight differences in whitespace for fill methods
#' op <- par(xpd = NA, family = 'mono', cex = 0.8)
#' plot(0, ann = FALSE, axes = FALSE, type = 'n')
#' text(1, 1, justify(x, fill = 'random'))
#' text(1, 0, justify(x, fill = 'right'), col = 2)
#' text(1, -1, justify(x, fill = 'left'), col = 3)
#' par(op)
#'
#' @export
justify <- function(string, width = getOption('width') - 10L,
fill = c('random', 'right', 'left')) {
fill <- match.arg(fill)
string <- gsub('\n', '\n\n', string, fixed = TRUE)
strs <- strwrap(string, width = width)
paste(fill_spaces_(strs, width, fill), collapse = '\n')
}
fill_spaces_ <- function(lines, width, fill) {
tokens <- strsplit(lines, '\\s+')
res <- lapply(head(tokens, -1L), function(x) {
nspace <- max(length(x) - 1L, 1L)
extra <- width - sum(nchar(x)) - nspace
reps <- extra %/% nspace
extra <- extra %% nspace
times <- rep.int(if (reps > 0L) reps + 1L else 1L, nspace)
if (extra > 0L) {
if (fill == 'right')
times[seq.int(extra)] <- times[seq.int(extra)] + 1L
else if (fill == 'left')
times[(nspace - extra + 1L):nspace] <-
times[(nspace - extra + 1L):nspace] + 1L
else times[inds] <- times[(inds <- sample(nspace, extra))] + 1L
}
spaces <- c('', unlist(lapply(times, formatC, x = ' ', digits = NULL)))
res <- paste(c(rbind(spaces, x)), collapse = '')
if (sum(c(nchar(x), length(x), extra)) < width / 2)
gsub('\\s{1,}', ' ', res) else res
})
c(res, paste(tail(tokens, 1L)[[1L]], collapse = ' '))
}
#' Find factors
#'
#' Find common factors of two or more integers.
#'
#' @param ... integers
#'
#' @examples
#' factors(21)
#' factors(3 * 2 ^ 20)
#' factors(64, 128, 58)
#'
#' @export
factors <- function(...) {
factors_ <- function(x) {
x <- as.integer(x)
y <- seq_len(abs(x))
y[x %% y == 0L]
}
l <- lapply(list(...), factors_)
Reduce(intersect, l)
}
#' Sample each
#'
#' Returns a logical vector where \code{n} items are randomly sampled from
#' each unique value of a vector, \code{x}.
#'
#' @param x a character, factor, or numeric vector
#' @param n number to sample from each unique group in order; if \code{x} is
#' a factor, \code{n} should correspond to \code{levels(x)}; otherwise,
#' \code{n} will be matched with the sorted unique groups
#'
#' @return
#' A logical vector the same length as \code{x} identifying selected indices.
#'
#' @seealso
#' \code{\link{sample}}; \code{\link{kinda_sort}}
#'
#' @examples
#' x <- mtcars$gear
#'
#' sample_each(x)
#' mtcars[sample_each(x), ]
#'
#' ## compare numeric vs factor vectors (see description above)
#' mtcars[sample_each(x, 3:5), ]
#' X <- factor(x, 5:3)
#' mtcars[sample_each(X, 3:5), ]
#'
#' @export
sample_each <- function(x, n = 1L) {
x <- setNames(x, x)
lx <- table(x)
nT <- setNames(rep_len(n, length(lx)), names(lx))
nF <- lx - nT
x <- as.character(x)
idx <- ave(x, x, FUN = function(xx)
sample(rep(0:1, c(nF[xx[1L]], nT[xx[1L]]))))
!!as.numeric(idx)
}
#' Pick elements from columns
#'
#' This function will return \code{\link{colnames}} or column values (if
#' \code{value = TRUE}) for "indicator-like" matrices or data frames.
#'
#' @param data a data frame or matrix
#' @param ind if \code{value = FALSE} (default), a vector (usually a single
#' value) to match and return column name(s) of \code{data} where \code{ind}
#' is found; if \code{value = TRUE}, a vector of values to be \emph{ignored}
#' @param value logical; if \code{TRUE}, returns column value(s); otherwise,
#' returns column name(s) (default)
#'
#' @return
#' If \code{value} is \code{FALSE} (default), the column names of \code{data}
#' for which each row of \code{data} contained \code{ind}.
#'
#' If \code{value} is \code{TRUE}, the column values of \code{data} which are
#' \emph{not} values of \code{ind}.
#'
#' @examples
#' set.seed(1)
#' ss <- sample(10)
#' dd <- as.matrix(ftable(1:10, ss))
#'
#' all(pickcol(dd) == ss)
#'
#' rn <- rnorm(10)
#' dd[dd == 1] <- rn
#' all(pickcol(dd, value = TRUE, ind = 0) == rowSums(dd))
#'
#'
#' dd <- data.frame(
#' x = c(1, 0, 0),
#' y = c(0, 0, 1),
#' z = c(0, 1, 0),
#' a = c('one', '', ''),
#' b = c('', '', 'three'),
#' c = c('', 'two', '')
#' )
#'
#' pickcol(dd[1:2])
#' pickcol(dd[1:2], 0)
#' pickcol(dd[1:3] + 1, ind = 2)
#'
#' pickcol(dd[4:6], value = TRUE, ind = '')
#' pickcol(dd, value = TRUE, ind = c('', 0:1))
#'
#' dd[dd == ''] <- NA
#' pickcol(dd[4:6], value = TRUE)
#'
#' @export
pickcol <- function(data, ind = 1L, value = FALSE) {
res <- apply(data, 1L, function(x) {
if (value) {
x[x %in% ind] <- NA
if (length(x <- x[!is.na(x)]) > 1L)
toString(x) else x
} else {
idx <- x %in% ind
if (sum(idx))
toString(names(x[idx])) else NA
}
})
unname(res)
}
#' Number of unique values
#'
#' @param x a vector
#' @param na.rm logical; if \code{TRUE}, \code{NA} will not be counted as a
#' unique level; default is to include
#'
#' @examples
#' x <- c(1:5, NA)
#' lunique(factor(x))
#' lunique(x, TRUE)
#'
#' @export
lunique <- function(x, na.rm = FALSE) {
length(unique(if (na.rm) sort(x) else x))
}
#' Remove non ASCII characters
#'
#' @param x a character vector
#'
#' @export
rm_nonascii <- function(x) {
gsub('[^\x20-\x7E]', '', x)
}
#' \code{datatable}s with sparklines
#'
#' Create an HTML table widget using the JavaScript library DataTables
#' (\code{\link[DT]{datatable}}) with \code{\link[sparkline]{sparkline}}
#' columns.
#'
#' @param data a data frame or matrix
#' @param spark a \emph{named} list of lists for each column of \code{data}
#' for which an interactive sparkline will replace each row cell
#'
#' each named list of \code{spark} should have length \code{nrow(data)} and
#' contain at least one numeric value
#' @param type the type of sparkline, one or more of "line", "bar", "box1",
#' or "box2", recycled as needed; the only difference between "box1" and
#' "box2" is the use of \code{spark_range}
#' @param spark_range an optional list or vector (recycled as needed) giving
#' the overall range for each list of \code{spark}; if missing, the ranges
#' will be calculated; note this is only applicable for \code{type = "line"}
#' or \code{type = "box1"}
#' @param options,... \code{options} or additional arguments passed to
#' \code{\link[DT]{datatable}}
#'
#' @seealso
#' Adapted from \url{leonawicz.github.io/HtmlWidgetExamples/ex_dt_sparkline.html}
#'
#' @examples
#' \dontrun{
#' library('DT')
#'
#' ## strings of data separated by commas should be passed to each row
#' ## this data will be used to generate the sparkline
#' dd <- aggregate(cbind(wt, mpg) ~ gear, mtcars, function(x)
#' toString(fivenum(x)))
#' sparkDT(dd)
#'
#'
#' ## for each column, create a list of vectors for each row to be plotted
#' l <- sapply(c('wt', 'mpg'), function(x)
#' split(mtcars[, x], mtcars$gear), simplify = FALSE, USE.NAMES = TRUE)
#'
#' sparkDT(dd, l, type = 'box1')
#'
#'
#' set.seed(1)
#' spark <- replicate(nrow(mtcars), round(rnorm(sample(20:100, 1)), 2), FALSE)
#'
#' sparkDT(mtcars, list(mpg = spark, wt = spark, disp = spark, qsec = spark))
#'
#' sparkDT(mtcars, list(mpg = spark, wt = spark, disp = spark, qsec = spark),
#' spark_range = list(disp = c(-5, 5), mpg = c(0, 10)))
#'
#' ## note difference between box1 (boxes aligned) and box2 (max size)
#' sparkDT(mtcars[, c('mpg', 'wt')],
#' list(mpg = spark, wt = spark),
#' type = c('box1', 'box2'),
#' # range = c(-2, 2),
#' rownames = FALSE,
#' colnames = c('box1', 'box2')
#' )
#' }
#'
#' @export
sparkDT <- function(data, spark, type = c('line', 'bar', 'box1', 'box2'),
spark_range, options = list(), ...) {
data <- as.data.frame(data)
if (missing(spark))
return(DT::datatable(data = data, ..., options = options))
srange <- lapply(spark, function(x) range(unlist(x), na.rm = TRUE))
spark_range <- if (missing(spark_range))
srange
else if (!is.list(spark_range))
setNames(list(spark_range)[rep_len(1L, length(spark))], names(spark))
else if (length(names(spark_range)))
modifyList(srange, spark_range)
else setNames(spark_range, names(spark))
spark_range <- spark_range[names(spark)]
type <- match.arg(type, several.ok = TRUE)
type <- rep_len(type, length(spark))
stopifnot(
all(names(spark) %in% names(data))
)
spark <- rapply(spark, paste, collapse = ', ', how = 'list')
data[, names(spark)] <- lapply(spark, unlist)
render_sparkDT(data, names(spark), type, spark_range, options, ...)
}
render_sparkDT <- function(data, variables, type, range, options, ...) {
## catch case of rownames = FALSE - first spark col does not render
dots <- lapply(substitute(alist(...))[-1L], eval)
if (identical(dots$rownames, FALSE))
dots$rownames <- rep_len('', nrow(data))
targets <- match(variables, names(data))
idx <- seq_along(targets)
## each column definition and type need a distinct class with variable name
columnDefs <- lapply(idx, function(ii)
list(
targets = targets[ii],
render = DT::JS(
sprintf("function(data, type, full){ return '<span class=spark%s>' + data + '</span>' }",
variables[ii])
)
)
)
type <- lapply(idx, function(ii) {
bar <- "type: 'bar' , barColor: 'orange', negBarColor: 'purple', highlightColor: 'black'"
line <- "type: 'line', lineColor: 'black', fillColor: '#cccccc', highlightLineColor: 'orange', highlightSpotColor: 'orange'"
box <- "type: 'box' , lineColor: 'black', whiskerColor: 'black' , outlierFillColor: 'black' , outlierLineColor: 'black', medianColor: 'black', boxFillColor: 'orange', boxLineColor: 'black'"
r <- range[[ii]]
line_range <- sprintf('%s , chartRangeMin: %s , chartRangeMax: %s',
line, r[1L], r[2L])
box_range <- sprintf('%s , chartRangeMin: %s , chartRangeMax: %s',
box, r[1L], r[2L])
types <- list(bar = bar, line = line_range, box1 = box_range, box2 = box)
types[match(type[ii], names(types))]
})
js <- sapply(idx, function(ii) sprintf(
"$('.spark%s:not(:has(canvas))').sparkline('html', { %s }); \n",
variables[ii], type[[ii]])
)
js <- sprintf(
'function (oSettings, json) {\n %s }\n', paste(js, collapse = '\n')
)
oo <- list(columnDefs = columnDefs, fnDrawCallback = DT::JS(js))
dt <- do.call(
DT::datatable,
c(list(data = data, options = modifyList(options, oo)), dots)
)
dt$dependencies <-
c(dt$dependencies, htmlwidgets::getDependency('sparkline'))
dt
}
#' Clear workspace
#'
#' Clear the workspace by removing all objects in \code{\link{ls}} followed
#' by \code{\link[=gc]{garbage collection}}.
#'
#' @param all.names logical; if \code{TRUE}, also removes hidden (dot) objects
#'
#' @seealso
#' \code{\link{clear}}
#'
#' @export
clc <- function(all.names = FALSE) {
rm(list = ls(.GlobalEnv, all.names = all.names), envir = .GlobalEnv)
gc(TRUE)
invisible(NULL)
}
#' Clear console
#'
#' Clear the console window.
#'
#' @param ... ignored
#'
#' @seealso
#' \code{\link{clc}}
#'
#' @export
clear <- function(...) {
cat('\014')
}
#' Re-load a package
#'
#' Detach and re-load a package.
#'
#' @param package package name, as a \code{\link{name}} or literal character
#' string
#'
#' @export
reload <- function(package) {
if (!is.character(substitute(package)))
package <- deparse(substitute(package))
# tryCatch(
# detach(paste0('package:', package), unload = TRUE, character.only = TRUE),
# error = function(e) NULL
# )
ok <- tryCatch(
{unloadNamespace(package); library(package, character.only = TRUE); TRUE},
error = function(e) {print(e); FALSE}
)
invisible(ok)
}
| /R/zxx.R | no_license | Huaichao2018/rawr | R | false | false | 69,069 | r | ### some random shit
# ht, progress, recoder, identical2, all_equal2, search_df, search_hist,
# fapply, try_require, list2file, Restart, helpExtract, Round, round_to,
# updateR, read_clip, read_clip.csv, read_clip.tab, read_clip.fwf, icols,
# fill_df, kinda_sort, sym_sort, rgene, install_temp, nestedMerge, nestedmerge,
# path_extract, fname, file_name, file_ext, rm_ext, mgrepl, mgrep, msub, mgsub,
# flatten, tree, rm_null, cum_reset, cum_na, cumsum_na, cumprod_na, cummax_na,
# cummin_na, cum_mid, vgrep, vgrepl, justify, factors, sample_each, pickcol,
# lunique, rm_nonascii, sparkDT, clc, clear
#
# unexported:
# helpExtract_, mgrep_, msub_, fill_spaces_, render_sparkDT
###
#' \code{head}/\code{tail}
#'
#' \code{\link{rbind}} the \code{\link{head}} and \code{\link{tail}} of an
#' object.
#'
#' @param x an object
#' @param n an integer giving the first and last \code{n / 2} elements if
#' positive or the middle \code{n} elements if negative
#' @param sep separator
#'
#' @examples
#' ht(letters, 6, '...')
#' ht(letters, -6)
#'
#'
#' mt <- cbind(mtcars, n = seq.int(nrow(mtcars)))
#'
#' ## ends
#' ht(mt)
#' ht(mt, sep = '...')
#'
#' ## middle
#' ht(as.matrix(mt), -6)
#' ht(mt, -6)
#' ht(mt, -6, sep = '...')
#'
#' @export
ht <- function(x, n = 6L, sep = NULL) {
pn <- abs(n) / 2
FUN <- if (is.null(dim(x)))
function(...) setNames(c(...), NULL) else 'rbind'
FUN <- match.fun(FUN)
if (n < 0L) {
idx <- cut(seq.int(NROW(x)), breaks = 2L, labels = 1:2)
x <- if (is.null(dim(x)))
list(x[idx %in% '1'], x[idx %in% '2'])
else
list(
x[idx %in% '1', , drop = FALSE],
x[idx %in% '2', , drop = FALSE]
)
FUN(' ' = sep, tail(x[[1L]], pn), head(x[[2L]], pn), ' ' = sep)
} else FUN(head(x, pn), ' ' = sep, tail(x, pn))
}
#' Progress function
#'
#' Displays the percent (or iterations) completed during some loop.
#'
#' @param value numeric; i-th iteration or percent completed (values 0-100)
#' @param max.value numeric; n-th iteration; if missing, will assume percent
#' completion is desired
#' @param textbar logical; if \code{TRUE}, uses text progress bar which will
#' span across the console width; see \code{\link{options}}
#'
#' @examples
#' \dontrun{
#' iterations <- 77
#' ## percent completed:
#' for (ii in 1:iterations) {
#' progress(ii / iterations * 100)
#' Sys.sleep(.01)
#' }
#'
#' ## iterations completed
#' for (ii in 1:iterations) {
#' progress(ii, iterations)
#' Sys.sleep(.01)
#' }
#'
#' ## text progress bar
#' for (ii in 1:iterations) {
#' progress(ii, iterations, textbar = TRUE)
#' Sys.sleep(.01)
#' }
#' }
#'
#' @export
progress <- function(value, max.value, textbar = FALSE) {
if (!is.numeric(value))
stop('\'value\' must be numeric')
oo <- options(scipen = 10)
on.exit(options(oo))
percent <- if (missing(max.value)) {
max.value <- 100
TRUE
} else FALSE
f <- function(...) paste0(..., collapse = '')
erase.only <- value > max.value
max.value <- as.character(round(max.value))
l <- nchar(max.value)
# value <- formatC(round(value), width = l, format = 'd')
# max.value <- formatC(max.value, width = l, format = 'd')
if (textbar) {
# m <- getOption('width')
# r <- trunc(as.numeric(value) / as.numeric(max.value) * m)
# backspaces <- f(rep('\b', m * 2))
#
# if (erase.only) message <- ''
# else {
# message <- f('|', f(rep('=', max(0, r - 1))),
# f(rep(' ', max(0, m - r))), '|')
# cat(backspaces, message, sep = '')
# }
m <- getOption('width') - 5L
pct <- as.numeric(value) / as.numeric(max.value)
r <- trunc(pct * m)
backspaces <- f(rep('\b', m * 2))
message <- if (erase.only)
'' else {
message <- f('|', f(rep('=', max(0, r - 1))),
f(rep(' ', max(0, m - r))), '|')
cat(backspaces, message, sprintf(' %s%%', round(pct * 100)), sep = '')
}
} else {
if (percent) {
backspaces <- f(rep('\b', l + 14L))
message <- if (erase.only)
'' else sprintf('Progress: %s%%', round(value))
cat(backspaces, message, sep = '')
} else {
backspaces <- f(rep('\b', 2 * l + 17L))
message <- if (erase.only)
'' else sprintf('Progress: %s of %s ', value, max.value)
cat(backspaces, message, sep = '')
}
}
if (.Platform$OS.type == 'windows')
flush.console()
cat('\n')
}
#' Recode variables
#'
#' Recodes numeric, character, and factor values in a vector, list, matrix,
#' or data frame.
#'
#' When recoding a factor variable with a new level, \code{recoder}
#' automatically adds the corresponding level to \code{levels(object)} to
#' avoid errors.
#'
#' The function currently recursively replaces \code{pattern[i]} with
#' \code{replacement[i]} in sequential order, so if you intend to swap values,
#' say \code{a} and \code{b}, in an \code{object}, \code{recoder} will instead
#' first replace all occurrences of \code{a} with \code{b} and then all
#' occurrences of \code{b} with \code{a} resulting in the \code{object} with
#' no \code{b} occurrences; see examples. I will (may) fix this eventually.
#'
#' @param object object to recode
#' @param pattern what to replace
#' @param replacement what to replace \code{pattern} with
#' @param ... ignored
#'
#' @return
#' An object with the same length (or dimensions) and class as \code{object}
#' with the recoded variables.
#'
#' @seealso
#' \code{\link{fill_df}}; \code{\link[car]{recode}};
#' \code{\link{combine_levels}}
#'
#' @examples
#' recoder(mtcars$carb, c(1, 2), c('A', 'B'))
#' recoder(mtcars, c(1, 2), c('A', 'B'))
#'
#' mtcars <- within(mtcars, carb1 <- factor(carb))
#' recoder(mtcars$carb1, 1, 999)
#'
#' tmp <- c(list(1:5), list(5), list(NA))
#' recoder(tmp, 5, NA)
#'
#' ## example from note
#' tmp <- 1:10
#' recoder(tmp, c(1, 2), c(2, 1))
#' # [1] 1 1 3 4 5 6 7 8 9 10 ## actual return
#' # [1] 2 1 3 4 5 6 7 8 9 10 ## desired return
#'
#' @export
recoder <- function(object, pattern, replacement, ...) {
## to do:
# add swapping option
# add expression option, eg, if object[i, j] > 0, use replacement
# fix level printing: DONE
# allow NA for input: DONE
# need to recode factor and numeric NAs simultaneously?
m <- match.call()
op <- options(stringsAsFactors = FALSE)
on.exit(options(op))
if (is.factor(object)) {
lvl <- setdiff(replacement, levels(object))
if (length(lvl))
cat('level(s)', levels(factor(levels = lvl)),
'added to factor variable', deparse(m$object), '\n')
levels(object) <- c(levels(object), replacement)
# object <- droplevels(object)
}
if (length(replacement) == 1L)
replacement <- rep(replacement, length(pattern))
## helper functions
splitter <- function(df) {
setNames(split(t(df), seq.int(ncol(df))), names(df))
}
switcher <- function(f, g, h) {
if (is.na(g))
f[is.na(f)] <- h
else
f[f == g] <- h
f
}
superswitcher <- function(x, y, z) {
DF <- data.frame(y, z, stringsAsFactors = FALSE)
z <- x
if (class(DF[, 2L]) %in% c('character', 'factor')) {
lapply(seq.int(nrow(DF)), function(i) {
if (sum(z %in% DF[i, 1]) == 0) {
z <<- z
} else {
z <<- switcher(z, DF[i, 1L], as.character(DF[i, 2L]))
}
})
} else {
lapply(seq.int(nrow(DF)), function(i) {
z <<- switcher(z, DF[i, 1L], DF[i, 2L])
})
}
z
}
# treat certain object classes differently
if (is.vector(object) & !is.list(object)) {
sapply(object, superswitcher, pattern, replacement)
} else {
if (is.data.frame(object)) {
tmp <- do.call('data.frame',
lapply(unclass(object)[seq.int(ncol(object))],
superswitcher, pattern, replacement))
rownames(tmp) <- attr(object, 'row.names')
return(tmp)
}
if (is.matrix(object)) {
nrow <- nrow(object)
tmp <- do.call('rbind',
lapply(object, superswitcher, pattern, replacement))
tmp <- matrix(tmp, nrow = nrow, byrow = FALSE)
return(tmp)
} else {
if (is.factor(object))
factor(unlist(lapply(object, superswitcher, pattern, replacement)),
levels(object), ordered = is.ordered(object))
else lapply(object, superswitcher, pattern, replacement)
}
}
}
#' Test two or more objects for exact equality
#'
#' The safe and reliable way to test two or more objects for being exactly
#' equal; returns \code{TRUE} in this case, \code{FALSE} in every other case.
#'
#' @param ... any \code{R} objects
#' @param num.eq logical indicating if (\code{\link{double}} and
#' \code{\link{complex}} non-\code{\link{NA}}) numbers should be compared
#' using \code{\link{==}} ("equal"), or by bitwise comparison. The latter
#' (non-default) differentiates between -0 and +0.
#' @param single.NA logical indicating if there is conceptually just one
#' numeric \code{NA} and one \code{\link{NaN}}; \code{single.NA = FALSE}
#' differentiates bit patterns.
#' @param attrib.as.set logical indicating if \code{\link{attributes}} of
#' \code{...} should be treated as \emph{unordered} tagged pairlists ("sets");
#' this currently also applies to \code{\link{slot}}s of S4 objects. It may
#' well be too strict to set \code{attrib.as.set = FALSE}.
#' @param ignore.bytecode logical indicating if byte code should be ignored
#' when comparing \code{\link{closure}}s.
#' @param ignore.environment logical indicating if their environments should
#' be ignored when comparing \code{closure}s.
#'
#' @return
#' A single logical value, \code{TRUE} or \code{FALSE}, never \code{NA}
#' and never anything other than a single value.
#'
#' @seealso
#' \code{\link{identical}}; \code{\link{all.equal}} for descriptions of how
#' two objects differ; \code{\link{Comparison}} for operators that generate
#' elementwise comparisons; \code{\link{isTRUE}} is a simple wrapper based
#' on \code{identical}; \code{\link{all_equal2}}
#'
#' @examples
#' identical2(1, 1.)
#' identical2(1, 1., 1L)
#'
#' ## for unusual R objects:
#' identical2(.GlobalEnv, environment(), globalenv(), as.environment(1))
#'
#' identical2(0., 0, -0.) ## not differentiated
#' identical2(0., 0, -0., num.eq = FALSE)
#'
#' identical2(NaN, -NaN)
#' identical2(NaN, -NaN, single.NA = FALSE) ## differ on bit-level
#'
#' ## for functions
#' f <- function(x) x
#' g <- compiler::cmpfun(f)
#' identical2(f, g)
#' identical2(f, g, ignore.bytecode = FALSE)
#'
#' @export
identical2 <- function(..., num.eq = TRUE, single.NA = TRUE,
attrib.as.set = TRUE, ignore.bytecode = TRUE,
ignore.environment = FALSE) {
if (length(l <- list(...)) < 2L)
stop('must provide at least two objects')
l <- sapply(seq.int((length(l) - 1L)), function(ii)
identical(l[ii], l[ii + 1L], num.eq = num.eq, single.NA = single.NA,
attrib.as.set = attrib.as.set, ignore.bytecode = ignore.bytecode,
ignore.environment = ignore.environment))
all(l)
}
#' Test if two or more objects are (nearly) equal
#'
#' A generalization of \code{\link{all.equal}} that allows more than two
#' objects to be tested for near-equality.
#'
#' @param ... any \code{R} objects
#' @param tolerance numeric >= 0; differences smaller than \code{tolerance}
#' are not reported (default value is close to 1.5e-8)
#' @param scale numeric scalar > 0 (or \code{NULL}), see details in
#' \code{\link{all.equal}}
#' @param check.attributes logical indicating if the \code{\link{attributes}}
#' should be compared
#' @param use.names logical indicating if \code{\link{list}} comparison should
#' report differing components by name (if matching) instead of integer index
#' @param all.names logical passed to \code{\link{ls}} indicating if "hidden"
#' objects should also be considered in the environments
#' @param check.names logical indicating if the \code{\link{names}}\code{(.)}
#' should be compared
#'
#' @return
#' If all \code{...} are nearly equal, \code{TRUE} otherwise returns a list
#' with the objects that failed.
#'
#' @seealso
#' \code{\link{all.equal}}; \code{\link{identical2}}; \code{\link{identical}}
#'
#' @examples
#' all_equal2(pi, 355/113, 22/7)
#' all_equal2(pi, 355/113, 22/7, tolerance = 0.01)
#'
#' all_equal2(cars[1], cars[, 1, drop = FALSE], cars[, -2, drop = TRUE])
#'
#' @export all_equal2
all_equal2 <- function(..., tolerance = .Machine$double.eps ^ 0.5,
scale = NULL, check.attributes = TRUE,
use.names = TRUE, all.names = TRUE,
check.names = TRUE) {
dots <- substitute(...())
l <- setNames(list(...), dots)
if (length(l <- list(...)) < 2L)
stop('must provide at least two objects')
l <- lapply(seq.int((length(l) - 1L)), function(x)
do.call('all.equal', list(
target = l[[x]], current = l[[x + 1L]],
tolerance = tolerance, check.attributes = check.attributes,
scale = scale, use.names = use.names, all.names = all.names))
)
trues <- c(TRUE, sapply(l, isTRUE))
trues[1L] <- trues[2L]
if (all(trues))
TRUE else dots[!trues]
}
#' Search function for data frames
#'
#' Searches a data frame column for matches.
#'
#' @param pattern string to find
#' @param data data frame to search
#' @param col.name column name in \code{data} to search
#' @param var variation; maximum distance allowed for a match; see
#' \code{\link{agrep}}
#' @param ignore.case logical; if \code{FALSE}, the pattern matching is
#' \emph{case-sensitive}, and if \code{TRUE}, case is ignored during matching
#' @param ... additional arguments passed to \code{\link{agrep}}
#'
#' @return
#' Subset of the original \code{data} where the \code{pattern} was found in
#' the specified \code{col.name}.
#'
#' @examples
#' dd <- data.frame(islands = names(islands)[1:32], mtcars)
#' search_df(New, dd, islands)
#' search_df(ho, dd, islands, var = 0.2) # too much variation
#' search_df(ho, dd, islands, var = 0)
#' search_df('Axel Hieberg', dd, islands) # misspelled, not enough variation
#' search_df('Axel Hieberg', dd, islands, var = 2)
#' search_df(19, dd, mpg)
#'
#' @export
search_df <- function(pattern, data, col.name, var = 0,
ignore.case = TRUE, ...) {
p <- as.character(substitute(pattern))
x <- as.character(substitute(col.name))
idx <- agrep(p, data[, x], ignore.case = ignore.case,
max.distance = var, ...)
data[idx, ]
}
#' Search history
#'
#' Searches \code{.Rhistory} file for pattern matches.
#'
#' @param x numeric or character; if numeric, shows the most recent \code{n}
#' lines in \code{.Rhistory}; if character, searches for pattern matches
#' @param ... additional arguments passed to \code{\link{grep}}
#'
#' @return
#' A list of recent commands that match \code{pattern}.
#'
#' @examples
#' search_hist()
#' search_hist(25)
#' search_hist('?')
#' search_hist('?', fixed = TRUE)
#' search_hist('\\?')
#'
#' @export
search_hist <- function (x, ...) {
hist <- tryCatch(readLines('.Rhistory'),
warning = function(w) message('No history found'),
finally = return(invisible(NULL)))
lhist <- length(hist)
if (is.numeric(x))
hist[lhist:(lhist - x + 1L)]
else if (is.character(x))
grep(x, readLines('.Rhistory'), value = TRUE, ...)
}
#' Apply summary functions over list or vector
#'
#' \code{fapply} applies summary function(s) over a vector, list, or data
#' frame, and \code{fapply_by} applies summary function(s) over subsets of
#' a data frame.
#'
#' @param data for \code{fapply}, a vector, list, or data frame to operate on;
#' for \code{fapply_by}, a data frame containing the variables in \code{formula}
#' @param ... summary function(s) such as \code{length(.)} or
#' \code{mean(., na.rm = TRUE)} to apply; names are not required but strongly
#' recommended
#' @param formula a formula such as \code{y ~ x} or \code{cbind(y1, y2) ~ x1 + x2}
#' where the \code{y} variables are numeric data to be split into groups
#' according to the grouping \code{x} variables (usually factors)
#'
#' @examples
#' tmp <- recoder(mtcars, 6, NA)
#' fapply(tmp, mean = mean(.), median = median(., na.rm = TRUE))
#' fapply(mtcars$mpg, mean = mean(.))
#'
#' ## define a new function
#' ci <- function(x) {
#' q <- quantile(x, c(0.025, 0.975), na.rm = TRUE)
#' sprintf('%.0f (%.2f, %.2f)', median(x), q[1], q[2] )
#' }
#' fapply(mtcars, median(.), '95% CI' = ci(.))
#'
#' ## compare:
#' t(fapply(mtcars, min(.), mean(.), max(.), length(.)))
#' summary(mtcars)
#'
#'
#' fapply_by(mpg ~ vs + am, mtcars, mean(.), median(.), length(.))
#' fapply_by(as.matrix(mtcars) ~ vs, mtcars, mean = mean(.))
#'
#' ## one ~ one, one ~ many, many ~ one, and many ~ many
#' fapply_by(disp ~ cyl, mtcars, mean = mean(.))
#' fapply_by(disp ~ cyl + vs, mtcars, mean = mean(.))
#' fapply_by(cbind(disp, wt) ~ cyl, mtcars, mean = mean(.))
#' fapply_by(cbind(disp, wt) ~ cyl + vs, mtcars, mean = mean(.), n = length(.))
#'
#' ## compare
#' aggregate(cbind(disp, wt) ~ cyl + vs, mtcars, function(x)
#' c(mean(x), length(x)))
#'
#' @export
fapply <- function(data, ...) {
cl <- match.call(expand.dots = FALSE)$`...`
if (is.null(cl))
stop('no methods given')
cl <- c(alist(i = NULL), cl)
if (any(nn <- !nzchar(names(cl))))
names(cl)[nn] <- sapply(cl, deparse)[nn]
if (!is.list(data))
data <- list(data)
res <- lapply(cl[-1L], function(fn)
mapply(function(.) eval(fn, NULL), data))
setNames(data.frame(res, stringsAsFactors = FALSE), names(cl)[-1L])
}
#' @rdname fapply
#' @export
fapply_by <- function(formula, data, ...) {
cl <- match.call(expand.dots = FALSE)$`...`
if (is.null(cl))
stop('no methods given')
cl <- c(alist(i = NULL), cl)
if (any(nn <- !nzchar(names(cl))))
names(cl)[nn] <- sapply(cl, deparse)[nn]
nt <- length(all.vars(formula[[3L]]))
ag <- aggregate(formula, data, function(.)
lapply(cl, function(fn) eval(fn, NULL)))
ag <- unclass(ag)
ll <- lapply(tail(ag, -nt), function(x) {
x <- data.frame(x, check.names = FALSE)[, -1L, drop = FALSE]
data.frame(lapply(x, unlist), check.names = FALSE)
})
## useful names if >1 lhs variable
# if (length(all.vars(formula[[2L]])) > 1L)
ll <- if (length(ag) > (nt + 1L))
lapply(seq_along(ll), function(ii) {
names(ll[[ii]]) <- paste(names(ll)[ii], names(ll[[ii]]), sep = '.')
ll[[ii]]
}) else ll[[length(ll)]]
cbind(data.frame(head(ag, nt), check.names = FALSE), ll)
}
#' Quietly try to require a package
#'
#' Quietly require a package, returning an error message if not installed.
#'
#' @param package name of package as name or character string
#'
#' @export
try_require <- function(package) {
package <- ifelse(!is.character(substitute(package)),
as.character(substitute(package)), package)
available <- suppressMessages(
suppressWarnings(
sapply(package, require, quietly = TRUE,
character.only = TRUE, warn.conflicts = FALSE)
))
missing <- package[!available]
if (length(missing) > 0L)
stop(paste(package, collapse = ', '), ' package not found.')
}
#' List to file
#'
#' Save a \emph{named} list of data frames or matrices into \code{R} data files
#' \code{.rda}, \code{.csv}, or \code{.txt} files.
#'
#' @param l a list of data frames or matrices
#' @param targetdir target directory (created if doesn't exist)
#' @param sep field separator string; default is none which results in
#' \code{.rda} data files; "\code{,}" creates \code{.csv} files; any other
#' separator will create \code{.dat} files
#' @param ... additional arguments passed to \code{\link{save}} if \code{sep}
#' is not given or to \code{\link{write.table}} if \code{sep} is given
#'
#' @return
#' \code{list2file} will create \code{length(l)} files in the \code{targetdir}.
#'
#' @examples
#' \dontrun{
#' dfl <- setNames(list(mtcars, iris), c('mtcars','iris'))
#'
#' ## .csv files
#' list2file(dfl, '~/desktop/tmp', sep = ',')
#'
#' ## r data files
#' list2file(dfl, '~/desktop/tmp')
#' }
#'
#' @export
list2file <- function(l, targetdir = getwd(), sep, ...) {
if (!islist(l))
stop('\'l\' must be a list')
if (is.null(names(l)) || any(is.na(names(l))))
stop('all elements of \'l\' must be named')
if (any(sapply(l, class) %ni% c('data.frame', 'matrix')))
stop('all elements of \'l\' should be class \'matrix\' or \'data.frame\'')
if (!file.exists(targetdir)) {
message(sprintf('creating directory:\n%s', targetdir))
dir.create(targetdir)
}
e <- new.env()
list2env(l, envir = e)
if (missing(sep))
sapply(names(l), function(x)
save(x, file = sprintf('%s/%s.rda', targetdir, x), ...))
else sapply(names(l), function(x)
write.table(get(x, envir = e), sep = sep, ...,
file = sprintf('%s/%s.%s', targetdir, x,
ifelse(sep == ',', 'csv', 'dat'))))
message(sprintf('NOTE: %s written to %s', iprint(names(l)), targetdir))
invisible(NULL)
}
#' Restart \code{R} session
#'
#' Ends current and restarts a clean \code{R} session.
#'
#' @param afterRestartCommand character string of command(s) to be
#' executed after restarting
#'
#' @examples
#' \dontrun{
#' Restart("clear(); cat('Here is a clean session just for you')")
#' }
#'
#' @export
Restart <- function(afterRestartCommand = '') {
(getOption('restart'))(afterRestartCommand)
}
# Reload <- function(...) {
# ## clean (rstudio) r session packages:
# pkgs <- c(".GlobalEnv", "tools:rstudio", "package:stats", "package:graphics",
# "package:grDevices", "package:utils", "package:datasets",
# "package:methods", "Autoloads", "package:base")
# to_unload <- setdiff(search(), pkgs)
#
# for (pkg in to_unload)
# try(detach(pkg, unload = TRUE, character.only = TRUE), silent = TRUE)
# rm(list = ls(envir = .GlobalEnv), envir = .GlobalEnv)
# cat('\014')
#
# invisible(NULL)
# }
#' Extract \code{R} help files
#'
#' Extracts specified portions of R help files (from \emph{loaded} libraries)
#' for use in Sweave or R-markdown documents.
#'
#' The \code{type} argument accepts:
#'
#' \tabular{llllll}{
#' \tab \code{text} \tab \tab \tab \tab plain text \cr
#' \tab \code{md_code} \tab \tab \tab \tab markdown code chunks; for use with
#' markdown documents when highlighted code is expected \cr
#' \tab \code{md_text} \tab \tab \tab \tab markdown plain text; for use with
#' markdown documents where regular text is expected \cr
#' \tab \code{sw_code} \tab \tab \tab \tab sweave code chunks; for use with
#' Sweave documents where highlighted code is expected \cr
#' \tab \code{sw_text} \tab \tab \tab \tab sweave plain text; for use with
#' Sweave documents where regular text is expected \cr
#' }
#'
#' To see the results in the console:
#'
#' \code{cat(helpExtract(print, type = 'md_text'))}
#'
#' To insert a (highlighted) chunk into a markdown document:
#'
#' \verb{
#' ```{r, results='asis'}
#' cat(helpExtract(print), sep ='\n')
#' ```
#' }
#'
#' To insert a (highlighted) chunk into a Sweave document:
#'
#' \verb{
#' \\Sexpr{knit_child(textConnection(helpExtract(print, type = 's_code')),
#' options = list(tidy = FALSE, eval = FALSE))}
#' }
#'
#' @param FUN a function as name or character string
#' @param show.sections logical; if \code{TRUE}, returns \code{section} options
#' for \code{FUN}
#' @param section section to extract (default is \code{"Usage"}
#' @param type type of character vector you want returned; default is
#' \code{"m_code"}, see details
#' @param ... additional arguments passed to \code{\link[utils]{help}}
#'
#' @return
#' A character vector to be used in a Sweave or Rmarkdown document.
#'
#' @examples
#' helpExtract(print)
#' cat(helpExtract(print, section = 'ex'), sep = '\n')
#' cat(helpExtract(print, type = 'md_text', section = 'description'))
#'
#' ## selecting multiple sections prints section names
#' cat(helpExtract(print, section = c('references', 'see also')), sep = '\n')
#'
#' @export
helpExtract <- function(FUN, show.sections = FALSE, section = 'Usage',
type = c('text','md_code','md_text',
'sw_code','sw_text'), ...) {
type <- match.arg(type)
FUN <- if (is.function(FUN))
deparse(substitute(FUN)) else as.character(FUN)
x <- helpExtract_(FUN, ...)
## section start lines
B <- grep('^_\b._\b._', x)
x <- gsub('_\b', '', x, fixed = TRUE)
if (show.sections)
return(gsub(':','', x[B]))
X <- rep_len(0L, length(x))
X[B] <- 1L
res <- split(x, cumsum(X))
res <- res[which(sapply(res, function(x)
any(Vectorize(grepl)(section, x[1L], ignore.case = TRUE))))]
# res <- unlist(sapply(res, '[', -(1:2)))
res <- if (length(section) > 1L)
unname(unlist(res)) else res[[1L]][-(1:2)]
while (TRUE) {
res <- res[-length(res)]
if (nzchar(res[length(res)]))
break
}
switch(type,
text = res,
md_code = c('```r', res, '```'),
sw_code = c('<<>>=', res, '@'),
md_text = paste(' ', res, collapse = '\n'),
sw_text = c('\\begin{verbatim}', res, '\\end{verbatim}')
)
}
helpExtract_ <- function(FUN, ...) {
# (helpExtract_('print'))
stopifnot(is.character(FUN))
## tools:::fetchRdDB
fetchRdDB <- function(filebase, key = NULL) {
fun <- function(db) {
vals <- db$vals
vars <- db$vars
datafile <- db$datafile
compressed <- db$compressed
envhook <- db$envhook
fetch <- function(key)
lazyLoadDBfetch(vals[key][[1L]], datafile, compressed, envhook)
if (length(key)) {
if (!key %in% vars)
stop(gettextf("No help on %s found in RdDB %s",
sQuote(key), sQuote(filebase)), domain = NA)
fetch(key)
} else {
res <- lapply(vars, fetch)
names(res) <- vars
res
}
}
res <- lazyLoadDBexec(filebase, fun)
if (length(key))
res else invisible(res)
}
## utils:::.getHelpFile
getHelpFile <- function(file) {
path <- dirname(file)
dirpath <- dirname(path)
if (!file.exists(dirpath))
stop(gettextf("invalid %s argument", sQuote("file")), domain = NA)
pkgname <- basename(dirpath)
RdDB <- file.path(path, pkgname)
if (!file.exists(paste(RdDB, "rdx", sep = ".")))
stop(gettextf(paste("package %s exists but was not installed under R',
'>= 2.10.0 so help cannot be accessed"),
sQuote(pkgname)), domain = NA)
fetchRdDB(RdDB, basename(file))
}
x <- capture.output(
tools::Rd2txt(getHelpFile(utils::help(FUN, ...)),
options = list(sectionIndent = 0))
)
invisible(x)
}
#' Round vector to target sum
#'
#' Rounds a numeric vector constrained to sum to a \code{target} value.
#'
#' @param x numeric values
#' @param target desired sum of \code{x} after rounding
#'
#' @seealso
#' \code{\link{roundr}}; \code{\link{round_to}}
#'
#' @examples
#' pcts <- data.frame(
#' pct1 = c(33.3, 21.5, 45.51),
#' pct2 = c(33.3, 33.3, 33.3)
#' )
#'
#' ## base round
#' colSums(mapply(round, pcts))
#'
#' ## round to target
#' colSums(mapply(Round, pcts, 100))
#'
#' @export
Round <- function(x, target = NULL) {
r.x <- round(x)
diff.x <- r.x - x
if (is.null(target) || (s <- sum(r.x)) == target)
return(r.x)
if (s > target) {
select <- seq_along(x)[diff.x != 0]
wh <- which.max(diff.x[select])
x[select[wh]] <- r.x[select[wh]] - 1
} else {
select <- seq_along(x)[diff.x != 0]
wh <- which.min(diff.x[select])
x[select[wh]] <- r.x[select[wh]] + 1
}
Recall(x, target)
}
#' Round to
#'
#' Round numerics to nearest multiple of \code{to}.
#'
#' @param x a numeric vector
#' @param to nearest fraction or integer
#'
#' \code{\link{roundr}}; \code{\link{Round}}
#'
#' @examples
#' x <- 1:20 / 10
#' round_to(x, 1)
#' round_to(x, 0.5)
#'
#' @export
round_to <- function(x, to = 1) {
to <- abs(to)
round(x / to) * to
}
#' Update \code{R}
#'
#' Copies and updates \code{R} libraries from most recent installed version
#' into the current \code{\link{.libPaths}} directory. This assumes that the
#' user has installed a new \code{X.x} version of \code{R} but will not copy
#' any libraries from previous frameworks into the new library.
#'
#' @param update logical; if \code{TRUE}, checks for available packages
#' updates, downloads, and installs
#'
#' @seealso
#' \code{\link{update.packages}}
#'
#' @export
updateR <- function(update = TRUE) {
path <- file.path(R.home(), '..', '..')
v <- tail(sort(list.files(path, pattern = '^\\d{1}.\\d{1}$')), 2L)
if (!grepl(v[2L], .libPaths()))
stop('A more recent version of R was found on your system\n')
if (file.exists(v_last <- sub(v[2L], v[1L], .libPaths()))) {
pkg <- list.files(.libPaths())
pkg <- setdiff(list.files(v_last), pkg)
if (length(pkg) > 0L) {
cat(sprintf("Copying %s package%s to %s\n", length(pkg),
ifelse(length(pkg) > 1L, 's', ''), .libPaths()))
file.copy(file.path(v_last, pkg), .libPaths(), recursive = TRUE)
} else cat('No packages to copy\n')
}
if (update) {
if ((up <- table(packageStatus()$inst$Status)['upgrade']) > 0L) {
cat(sprintf('Updating %s package%s\n', up, ifelse(up > 1L, 's', '')))
update.packages(ask = FALSE)
} else cat('All packages are up-to-date\n')
}
}
#' Read data from clipboard
#'
#' Reads data (comma-, tab-, or fixed-width separated) data from clipboard and
#' returns as a data frame.
#'
#' @param header logical; indicates if variable names are in first line
#' @param ... additional arguments passed to \code{\link{read.table}}
#'
#' @seealso
#' \code{\link{read.table}}; \code{\link{read.fwf}}
#'
#' @export
read_clip <- function(header = TRUE, ...) {
if (Sys.info()['sysname'] %ni% 'Darwin')
read.table(file = 'clipboard', header = header, ...)
else read.table(file = pipe('pbpaste'), header = header, ...)
}
#' @rdname read_clip
#' @param sep separator as a character string
#' @export
read_clip.csv <- function(header = TRUE, sep = ',', ...) {
read_clip(header = header, sep = sep, ...)
}
#' @rdname read_clip
#' @export
read_clip.tab <- function(header = TRUE, sep = '\t', ...) {
read_clip(header = header, sep = sep, ...)
}
#' @rdname read_clip
#' @param widths a vector of widths of the fixed-width fields or a list of
#' vectors giving the widths for multiple lines
#' @export
read_clip.fwf <- function(header = TRUE, widths, ...) {
if (Sys.info()['sysname'] %ni% 'Darwin')
read.fwf(file = 'clipboard', header = header, widths = widths, ...)
else read.fwf(file = pipe('pbpaste'), header = header, widths = widths, ...)
}
#' Index columns by pattern
#'
#' Quickly selects and returns columns from a matrix or data frame by
#' \code{\link{grep}}'ing for a desired \code{pattern}.
#'
#' @param x a matrix or data frame
#' @param pattern pattern to match
#' @param keep optional vector of names of other columns to keep
#' @param ... additional parameters passed to \code{\link{grep}}
#'
#' @examples
#' icols(iris, 'Petal')
#' icols(iris, '\\.')
#' icols(mtcars, '^[\\w]{2}$')
#'
#' @export
icols <- function(x, pattern, keep, ...) {
keep <- if (missing(keep))
NULL else which(colnames(x) %in% keep)
x[, c(keep, grep(pattern, colnames(x), perl = TRUE, ...)), drop = FALSE]
}
#' Fill data frame
#'
#' Fills data frame, \code{data}, containing \code{NA} values using a look-up
#' table, \code{key}. \code{ids} and \code{fill} columns must be in both
#' \code{data} and \code{key}. If neither are given, \code{fill_df} will
#' smartly try to guess which columns need to be filled with the values from
#' the look-up table.
#'
#' @param data a data frame to recode
#' @param key a look-up table data frame
#' @param ids columns treated as id variables, as character strings or indices
#' @param fill columns to recode, as character strings or column indices
#' @param values optional vector of values to recode with \code{fill}; if
#' missing (default), \code{fill_df} only looks for \code{NA}s in \code{data};
#' otherwise, all occurrences of \code{values} will be replaced with
#' \code{NA}, and \code{fill_df} will procede normally
#'
#' @return
#' A data frame with \code{NA}s from \code{fill}-columns recoded to match
#' the values from \code{key}.
#'
#' @seealso
#' \code{\link{recoder}}; \code{\link{locf}}
#'
#' @examples
#' dd <- mtcars
#' dd[matrix(sample(c(TRUE, FALSE), 32 * 11, replace = TRUE), 32)] <- NA
#' identical(mtcars, fill_df(dd, mtcars)) ## TRUE
#'
#' ## recode other variables instead of NAs
#' nn <- sum(is.na(dd))
#' dd[is.na(dd)] <- sample(-10:-1, nn, replace = TRUE)
#' identical(mtcars, fill_df(dd, mtcars, values = -1:-10)) ## TRUE
#'
#' f <- function(x, n = 20) sample(x, size = n, replace = TRUE)
#' set.seed(1)
#' key_df <- data.frame(id = c(1,2,1,2), group = c(3,3,4,4),
#' x = c(100, 200, 300, 400), y = I(LETTERS[1:4]))
#' na_df <- data.frame(id = f(1:2), group = f(3:4),
#' x = f(c(0, NA)), y = I(f(c('', NA))), z = 1)
#'
#' ## auto: all cols with no NAs == ids; cols with any NAs = fill
#' fill_df(na_df, key_df)
#'
#' ## select which to be filled and returned
#' fill_df(na_df, key_df, ids = 1:2, fill = 'x')
#' fill_df(na_df, key_df, ids = 1:2, fill = 4)
#'
#' @export
fill_df <- function(data, key, ids, fill, values) {
nn <- names(data)
## if given replace "values" with NAs
if (!missing(values)) {
idx <- data
idx[] <- lapply(data, function(x) x %in% values)
data[as.matrix(idx)] <- NA
}
## get columns names not defined as ids or fill
if (length(whk <- which(nn %ni% names(key)))) {
whk <- nn[whk]
keep <- data[, whk, drop = FALSE]
data[, whk] <- NULL
} else keep <- NULL
## error checks
nd <- names(data)
nad <- vapply(data, anyNA, logical(1L))
if (all(!nad))
return(data)
## try to guess columns to use for ids/fill
ids <- if (missing(ids)) {
ids <- nd[which(!nad)]
message('\'ids\' : ', paste(ids, collapse = ', '), domain = NA)
ids
} else if (is.numeric(ids)) nd[ids] else ids
fill <- if (missing(fill)) {
fill <- nd[which(nad)]
message('\'fill\': ', paste(fill, collapse = ', '), domain = NA)
fill
} else if (is.numeric(fill))
nd[fill] else fill
## match current data rows with rows in key and fill NAs
ok <- all(nad)
nak <- if (ok)
seq.int(nrow(data)) else do.call('paste0', c(key[, ids, drop = FALSE]))
dfk <- if (ok)
seq.int(nrow(data)) else do.call('paste0', c(data[, ids, drop = FALSE]))
mm <- match(dfk, nak)
for (col in fill) {
nnr <- which(is.na(data[, col]))
data[nnr, col] <- key[mm[nnr], col]
}
# data[do.call('order', as.list(data[, c(nnk, nnf)])), ]
if (!is.null(keep))
cbind.data.frame(data, keep)[, nn, drop = FALSE]
else data[, nn, drop = FALSE]
}
#' Kinda sort
#'
#' @description
#' \code{\link{sort}} a vector but not very well.
#'
#' For a vector, \code{x}, \code{n} elements will be randomly selected, and
#' their positions will remain unchanged as all other elements are sorted.
#' Alternatively, a vector of \code{indices} of \code{x} can be given and
#' will remain unsorted.
#'
#' @param x a numeric, complex, character, or logical vector
#' @param n number of elements of x to remain unsorted (the default is
#' approximately 10\% of \code{x}), ignored if \code{indices} is given
#' @param decreasing logical; if \code{FALSE} (default), \code{x} is sorted
#' in increasing order
#' @param indices a vector of indices specifying which elements of \code{x}
#' should \emph{not} be sorted
#' @param index.return logical; if \code{TRUE}, the ordering index vector is
#' returned
#'
#' @return
#' \code{x} sorted approximately \code{(length(x) - n)/length(x)*100} percent.
#'
#' @seealso
#' \code{\link{sort2}}; \code{\link{sym_sort}}
#'
#' @examples
#' set.seed(1)
#' x <- sample(1:10)
#'
#' rbind(
#' unsorted = x,
#' '50% sort' = kinda_sort(x, n = 5),
#' 'fix 2:5' = kinda_sort(x, indices = 2:5)
#' )
#'
#' # [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
#' # unsorted 3 4 5 7 2 8 9 6 10 1
#' # 50% sort 3 4 5 6 2 8 7 9 10 1
#' # fix 2:5 1 4 5 7 2 3 6 8 9 10
#'
#'
#' ## use index.return = TRUE for indices instead of values
#' set.seed(1)
#' x <- runif(100)
#' o1 <- kinda_sort(x, n = 50, index.return = TRUE)
#'
#' set.seed(1)
#' x <- runif(100)
#' o2 <- kinda_sort(x, n = 50)
#'
#' stopifnot(
#' identical(x[o1], o2)
#' )
#'
#' @export
kinda_sort <- function(x, n, decreasing = FALSE, indices = NULL,
index.return = FALSE) {
l <- length(x)
n <- if (missing(n))
ceiling(0.1 * l) else if (n > l) l else n
if ((n <- as.integer(n)[1L]) == 0L)
return(x)
k <- sort(indices %||% sample(seq.int(l), n))
s <- replace(x, k, NA)
o <- sort2(s, decreasing, TRUE)
if (index.return)
o else x[o]
}
#' Symmetrical sort
#'
#' Sort a vector symmetrically, i.e., the two most extreme values are put
#' at opposite ends and repeated until the median value(s) is(are) put in
#' the middle of the sorted vector.
#'
#' @param x a numeric, complex, character, or logical vector
#' @param rev logical; if \code{TRUE}, vectors are sorted in reverse
#' @param index.return logical; if \code{TRUE}, the ordering index vector
#' is returned
#'
#' @seealso
#' \code{\link{kinda_sort}}
#'
#' @examples
#' sym_sort(letters)
#' sym_sort(letters, rev = TRUE)
#'
#' x <- runif(50)
#' plot(sym_sort(x))
#' plot(x[sym_sort(x, index.return = TRUE)])
#'
#' plot(sym_sort(x, rev = TRUE))
#' plot(-sym_sort(-x, rev = TRUE))
#'
#' @export
sym_sort <- function(x, rev = FALSE, index.return = FALSE) {
if (length(x) <= 1L)
return(x)
if (index.return)
names(x) <- seq_along(x)
rev <- if (rev)
0:1 else 1:0
s <- sort(x)
f <- rep_len(1:2, length(s))
sp <- split(s, f)
sp <- Vectorize(sort, SIMPLIFY = FALSE)(sp, decreasing = !!rev)
nn <- unlist(sapply(sp, names))
res <- unlist(c(sp))
names(res) <- nn
if (index.return)
as.integer(nn)
else res
}
#' Generate random gene names
#'
#' Generate random character strings from pools of letters and digits.
#'
#' @param n number of gene names to return
#' @param alpha vector of letters to select from
#' @param nalpha range of possible number of \code{alpha} to select
#' @param num numerics to select from
#' @param nnum range of possible number of \code{num} to select
#' @param sep character to separate \code{alpha} and \code{num}
#' @param seed seed; integer or \code{NULL}
#'
#' @examples
#' rgene()
#' rgene(5, alpha = 'ABCD', nalpha = 1, nnum = 5:6)
#' rgene(5, alpha = c('A','T','C','G'), num = '', sep = '')
#'
#' @export
rgene <- function(n = 1L, alpha = LETTERS[1:5], nalpha = 2:5,
num = 0:9, nnum = 1:5, sep = '-', seed = NULL) {
## helpers
p0 <- function(...) paste0(..., collapse = '')
alphas <- function() sample(alpha, sample(nalpha, 1), TRUE)
numerics <- function() sample(num, sample(nnum, 1), TRUE)
set.seed(seed)
replicate(n, p0(p0(alphas()), sep, p0(numerics())))
}
#' Install packages temporarily
#'
#' This function will create a temporary \code{.libPath}, install, and load
#' packages for use in a single \code{R} session. \cr \cr To install a repo
#' from github temporarily, use \code{\link[withr]{with_libpaths}}.
#'
#' @param pkgs character vector of the names of packages whose current
#' versions should be downloaded from the repositories
#' @param lib character vector giving the library directories where to install
#' \code{pkgs}; recycled as needed; if missing (default), a
#' \code{\link{tempdir}} will be created
#' @param ... additional arguments passed to
#' \code{\link[utils]{install.packages}}
#'
#' @examples
#' \dontrun{
#' install_temp(c('devtools', 'testthat'))
#' }
#'
#' @export
install_temp <- function(pkgs, lib, ...) {
if (missing(lib))
lib <- tempdir()
## resetting libPaths before restarting r session may not be desired
# lp <- .libPaths()
# on.exit(.libPaths(lp))
.libPaths(lib)
utils::install.packages(pkgs = pkgs, lib = lib, ...)
for (ii in pkgs)
require(ii, character.only = TRUE)
invisible(NULL)
}
#' Merge nested lists
#'
#' Recursive functions to merge nested lists.
#'
#' \code{nestedmerge} recursively calls itself to merge similarly-structured
#' named \emph{or} unnamed lists. Unnamed lists results in a "horizontal"
#' merge; named lists will be matched based on names. In either case, the
#' matching element (or list(s) of elements(s)) should also have the same
#' structure.
#'
#' \code{nestedMerge} is a convenience wrapper for \code{nestedmerge} in cases
#' where list \code{a} contains elements not in list \code{b}. If using
#' \code{nestedmerge} in this case, only elements of list \code{a} will be
#' merged and returned.
#'
#' @param x,y lists
#'
#' @seealso
#' \code{\link{clist}}; adapted from
#' \url{http://stackoverflow.com/questions/23483421/combine-
#' merge-lists-by-elements-names-list-in-list}
#'
#' @examples
#' ## l1 and l2 have similar structures
#' l1 <- list(a = list(1:2, NULL), b = list(1:3, NULL), c = list(1:5))
#' l2 <- list(a = list(NULL, 0:1), b = list(NULL, 4:6))
#' l3 <- list(a = list(NULL, 0:1), b = list(4:6))
#'
#' nestedMerge(l1, l2)
#'
#' ## "fails" for `b` since `l1$b` and `l3$b` are not structured similarly
#' nestedMerge(l1, l3)
#'
#' l1 <- list(integers = 1:3, letters = letters[1:3],
#' words = c('two','strings'), rand = rnorm(5))
#' l2 <- list(letters = letters[24:26], booleans = c(TRUE, TRUE, FALSE),
#' words = 'another', floating = c(1.2, 2.4),
#' integers = 1:3 * 10)
#'
#' nestedMerge(l1, l2)
#'
#' ## compare to
#' nestedmerge(l1, l2)
#'
#' @export
nestedMerge <- function(x, y) {
if (missing(y))
return(x)
if (islist(x) & islist(y)) {
nn <- setdiff(names(y), names(x))
x <- c(x, setNames(vector('list', length(nn)), nn))
}
nestedmerge(x, y)
}
#' @rdname nestedMerge
#' @export
nestedmerge <- function(x, y) {
if (missing(y))
return(x)
if (islist(x) & islist(y)) {
res <- list()
if (!is.null(names(x))) {
for (nn in names(x)) {
res <- if (nn %in% names(y) && !is.null(y[[nn]]))
append(res, c(Recall(x[[nn]], y[[nn]]))) else
append(res, list(x[[nn]]))
names(res)[length(res)] <- nn
}
} else {
for (ii in seq_along(x))
res <- if (ii <= length(y) && !is.null(y[[ii]]))
append(res, Recall(x[[ii]], y[[ii]])) else
append(res, list(x[[ii]]))
}
res
} else list(c(x, y))
}
#' Extract parts of file path
#'
#' These functions will extract the directory, file name, and file extension
#' of some common types of files. Additionally, \code{path_extract} will
#' check its results by recreating \code{path} and will give warnings if
#' the results fail to match the input.
#'
#' \code{fname} and \code{path_extract} do the text processing;
#' \code{file_name} and \code{file_ext} are convenience functions that only
#' return the file name or file extension, respectively.
#'
#' @note
#' Known examples where this function fails:
#' \itemize{
#' \item{\code{.tar.gz} }{files with compound file extensions}
#' }
#'
#' @param path file path as character string
#'
#' @seealso \code{\link[rawr]{regcaptures}}; \code{\link{basename}};
#' \code{\link{dirname}}
#'
#' @examples
#' l <- list(
#' '~/desktop/tmp.csv', ## normal file with directory
#' '.dotfile.txt', ## dotfile with extension
#' '.vimrc', ## dotfile with no extension
#' '~/file.', ## file name ending in .
#' '~/DESCRIPTION', ## no extension
#' '~/desktop/tmp/a.filename.tar.gz' ## compound extension fails
#' )
#'
#' setNames(lapply(l, path_extract), l)
#' setNames(lapply(l, fname), l)
#' setNames(lapply(l, file_name), l)
#' setNames(lapply(l, file_ext), l)
#'
#' @export
path_extract <- function(path) {
p <- normalizePath(path, mustWork = FALSE)
m <- cbind(dirname = dirname(p), basename = basename(p), fname(p))
mm <- file.path(
m[, 'dirname'],
paste(m[, 'filename'], m[, 'extension'],
sep = ifelse(nzchar(m[, 'extension']), '.', ''))
)
if (gsub('\\./', '', mm) != p || !nzchar(m[, 'filename']))
warning('Results could not be validated', domain = NA)
m
}
#' @rdname path_extract
#' @export
fname <- function(path) {
xx <- basename(path)
pp <- '(^\\.[^ .]+$|[^:\\/]*?[.$]?)(?:\\.([^ :\\/.]*))?$'
`colnames<-`(regcaptures2(xx, pp)[[1L]], c('filename', 'extension'))
}
#' @rdname path_extract
#' @export
file_name <- function(path) {
path_extract(path)[, 'filename']
}
#' @rdname path_extract
#' @export
file_ext <- function(path) {
path_extract(path)[, 'extension']
}
#' @rdname path_extract
#' @export
rm_ext <- function(path) {
gsub('(^\\.[^ .]+$|[^:\\/]*?[.$]?)(?:\\.([^ :\\/.]*))?$',
'\\1', path, perl = TRUE)
}
#' Multiple pattern matching and replacement
#'
#' Perform multiple pattern matching and replacement.
#'
#' @param pattern for substituting, a vector of length two for a single
#' replacement or a \emph{list} of length two vectors for multiple
#' replacements where each vector is \code{c(pattern,replacement)}; or for
#' grepping, a vector of character strings containing regular expressions
#' to be matched in \code{x}
#' @param x a character vector where matches are sought
#' @param ... additional parameters passed onto other methods
#' @param parallel logical; if \code{TRUE}, grepping will be performed in
#' \pkg{\link{parallel}}; also, if \code{pattern} is a vector greater than
#' \code{1e4} elements in length, \code{parallel} defaults to \code{TRUE}
#' @param replacement optional; if given, both \code{pattern} and
#' \code{replacement} should be character vectors of equal length
#' (\code{replacement} will be recycled if needed)
#'
#' @seealso
#' \code{\link[base]{grep}}; \code{\link{vgrep}}
#'
#' @examples
#' ## grepping
#' mgrep(letters[1:5], letters[1:5])
#' mgrepl(letters[1:5], letters[1:5])
#'
#' ## subbing
#' s1 <- 'thiS iS SooD'
#'
#' ## if replacement is given, acts like gsub
#' mgsub(c('hi', 'oo'), c('HI', '00'), s1)
#' mgsub(c('\\bS','$','i'), '_', rep(s1, 3))
#'
#' ## pattern can also be a list of c(pattern, replacement)
#' r1 <- c('hi','HI')
#' r2 <- c(list(r1), list(c('oo', '00')))
#' r3 <- c(r2, list(c('i', '1'), c('\\b(\\w)', '\\U\\1')))
#'
#' mgsub(r1, x = s1, ignore.case = TRUE)
#' mgsub(r2, x = s1)
#' mgsub(r3, x = s1, perl = TRUE)
#'
#' @name mgrep
NULL
mgrep_ <- function(parallel, FUN, vlist, ...) {
pattern <- vlist$pattern
x <- vlist$x
if (parallel) {
## if parallel = TRUE or long vector x (>1e4), run in parallel
requireNamespace('parallel')
cl <- makeCluster(nc <- getOption('cl.cores', detectCores()))
on.exit(stopCluster(cl))
clusterExport(cl = cl, varlist = c('x', 'pattern'), envir = environment())
parLapply(cl, seq_along(pattern),
function(ii) FUN(pattern = pattern[ii], x = x, ...))
} else {
## slow version
lapply(seq_along(pattern), function(ii)
FUN(pattern = pattern[ii], x = x, ...))
}
}
#' @rdname mgrep
#' @export
mgrepl <- function(pattern, x, ..., parallel = length(pattern) > 1e4) {
mgrep_(parallel = parallel, FUN = base::grepl, ...,
vlist = list(pattern = pattern, x = x))
}
#' @rdname mgrep
#' @export
mgrep <- function(pattern, x, ..., parallel = length(pattern) > 1e4) {
mgrep_(parallel = parallel, FUN = base::grep, ...,
vlist = list(pattern = pattern, x = x))
}
msub_ <- function(pattern, replacement, x, ..., FUN) {
dots <- match.call(expand.dots = FALSE)$...
FUN <- match.fun(FUN)
if (!missing(replacement))
pattern <- as.list(data.frame(
t(cbind(I(pattern), I(rep_len(replacement, length(pattern)))))))
if (!is.list(pattern))
pattern <- list(pattern)
sub2 <- function(l, x)
do.call(FUN, c(list(x = x, pattern = l[1L], replacement = l[2L]), dots))
Reduce('sub2', pattern, x, right = TRUE)
}
#' @rdname mgrep
#' @export
msub <- function(pattern, replacement, x, ...) {
msub_(pattern, replacement, x, ..., FUN = 'sub')
}
#' @rdname mgrep
#' @export
mgsub <- function(pattern, replacement, x, ...) {
msub_(pattern, replacement, x, ..., FUN = 'gsub')
}
#' Flatten lists
#'
#' Flattens lists and nested lists of vectors, matrices, and/or data frames.
#'
#' @param l a list
#'
#' @references
#' \url{https://stackoverflow.com/q/8139677/2994949}
#'
#' @examples
#' l <- list(matrix(1:3), list(1:3, 'foo'), TRUE, 'hi',
#' list(mtcars[1:5, 1:5], list(mtcars[1:5, 1:5])))
#' str(l)
#' str(flatten(l))
#'
#' @export
flatten <- function(l) {
while (any(vapply(l, islist, logical(1L)))) {
l <- lapply(l, function(x)
if (islist(x))
x else list(x))
l <- unlist(l, recursive = FALSE)
}
l
}
#' tree
#'
#' List contents of directories in a tree-like format.
#'
#' @param path file name path as character string
#' @param full.names logical; if \code{TRUE}, the full file path will be
#' returned; otherwise, only the \code{\link{basename}} is returned (default)
#' @param ndirs,nfiles maximum number of directories and files per directory
#' to print
#'
#' @references
#' \url{https://stackoverflow.com/q/14188197/2994949}
#'
#' @examples
#' str(tree(system.file(package = 'rawr'), FALSE))
#'
#' @export
tree <- function(path = '.', full.names = FALSE, ndirs = 5L, nfiles = 5L) {
## helper
tree_ <- function(path = '.', full.names, n) {
isdir <- file.info(path)$isdir
n <- as.integer(n)
res <- if (!isdir) {
if (full.names)
path else basename(path)
} else {
files <- list.files(path, full.names = TRUE, include.dirs = TRUE)
isdir <- file.info(files)$isdir
files <- files[isdir | cumsum(!isdir) <= n]
res <- lapply(files, tree_, full.names, n)
names(res) <- basename(files)
res
}
res
}
path <- normalizePath(path, mustWork = TRUE)
head(tree_(path, full.names, nfiles), ndirs)
}
#' Recursive \code{rm} for lists
#'
#' Remove \code{NULL} or \code{list(NULL)} objects recursively from a list.
#'
#' @param l a list
#' @param rm_list logical; if \code{FALSE}, lists with only the \code{NULL}
#' object will not be removed
#'
#' @references
#' \url{https://stackoverflow.com/q/26539441/2994949}
#'
#' @examples
#' str(l <- list(list(NULL),list(1),list('a', NULL)))
#' str(rm_null(l))
#' str(rm_null(l, FALSE))
#'
#' @export
rm_null <- function(l, rm_list = TRUE) {
isnull <- if (rm_list)
function(x) is.null(x) | all(vapply(x, is.null, logical(1L)))
else function(x) is.null(x)
x <- Filter(Negate(isnull), l)
lapply(x, function(x)
if (is.list(x))
rm_null(x, rm_list) else x)
}
#' Cumulative functions
#'
#' @description
#' \code{cum_reset} will reset a cumulative function, \code{FUN}, when
#' \code{value} is encountered.
#'
#' \code{*_na} functions offer alternatives to the \pkg{base}
#' \link[=cumsum]{cumulative functions} that can handle \code{NA}s.
#'
#' \code{cum_mid} finds the mid-points between "stacked" numeric values.
#'
#' @param x a vector (or numeric matrix for \code{cum_mid})
#' @param value a value of \code{x} which signals the end of a group and
#' resets \code{FUN}
#' @param FUN function to apply to each group, usually one of
#' \code{\link{cumsum}}, \code{\link{cumprod}}, \code{\link{cummax}}, or
#' \code{\link{cummin}} but can be any function that returns a vector the
#' same length and type as the input (\emph{a la} \code{\link{ave}})
#' @param useNA logical; if \code{TRUE}, indices with \code{NA} will be
#' unchanged; if \code{FALSE}, the previous value is carried forward
#' @param adj for \code{cum_mid}, an adjustment parameter, usually in
#' \code{[0, 1]}, giving the relative position between each value (default
#' is centered, \code{adj = 0.5})
#'
#' @return
#' A vector having the same length as \code{x} with \code{FUN} applied to
#' each group defined by positions of \code{value}.
#'
#' @seealso
#' \code{\link{cumsum}}; \code{\link{ave}}; \code{locf}
#'
#' @examples
#' x <- 1:10
#' cum_reset(x, 5, cummin)
#' cum_reset(x, c(5, 8), cummin)
#'
#' x[x %% 4 == 0] <- 0
#' cum_reset(x, FUN = cumsum)
#' cum_reset(x, FUN = sum)
#'
#' set.seed(1)
#' data.frame(x = x <- rpois(15, 1),
#' y = cum_reset(x, FUN = cumsum),
#' z = cum_reset(x, 0, function(x) ave(x, FUN = sum)))
#'
#'
#' ## x need not be numeric if FUN returns an appropriate type and length
#' cum_reset(letters[1:10], c('d','g'), function(x)
#' letters[as.numeric(factor(x))])
#'
#'
#' ## cum* functions to handle NA values
#' x <- 1:10
#' x[x %% 4 == 0] <- 0
#' na <- ifelse(x == 0, NA, x)
#'
#' cumsum(x)
#' cum_na(x, cumsum)
#'
#' cumsum(na)
#' cum_na(na, cumsum)
#'
#' ## shorthand
#' cumsum_na(na)
#' cumsum_na(na)
#'
#'
#' ## like cum_reset, cum_na's FUN argument can be generalized if FUN
#' ## returns the correct class and length of the input
#' FUN <- function(x) vector(class(x), length(x))
#' cum_na(na, FUN)
#'
#' cumdiff <- function(x) Reduce(`-`, x, accumulate = TRUE)
#' cumdiff(x)
#' cumsum(c(x[1L], -x[-1L]))
#'
#' cumdiff(na)
#' cumsum(c(na[1L], -na[-1L]))
#' cum_na(na, cumdiff)
#'
#'
#' ## "stacked" numeric values, eg, from a barplot
#' set.seed(1)
#' x <- matrix(runif(12), ncol = 3L)
#' bp <- barplot(x, names.arg = paste('adj = ', c(0, 1, 0.5)))
#'
#' for (ii in seq.int(ncol(x))) {
#' xii <- x[, ii, drop = FALSE]
#' text(bp[ii], cum_mid(xii, c(0, 1, 0.5)[ii]), xii, xpd = NA)
#' }
#'
#' @name cumfuns
NULL
#' @rdname cumfuns
#' @export
cum_reset <- function(x, value = 0L, FUN) {
FUN <- match.fun(FUN)
idx <- c(0L, head(cumsum(x %in% value), -1L))
unname(unlist(lapply(split(x, idx), FUN)))
}
#' @rdname cumfuns
#' @export
cum_na <- function(x, FUN, useNA = TRUE) {
FUN <- match.fun(FUN)
x[!is.na(x)] <- FUN(x[!is.na(x)])
if (useNA)
x else locf(x)
}
#' @rdname cumfuns
#' @export
cumsum_na <- function(x, useNA = TRUE) {
cum_na(x, cumsum, useNA)
}
#' @rdname cumfuns
#' @export
cumprod_na <- function(x, useNA = TRUE) {
cum_na(x, cumprod, useNA)
}
#' @rdname cumfuns
#' @export
cummax_na <- function(x, useNA = TRUE) {
cum_na(x, cummax, useNA)
}
#' @rdname cumfuns
#' @export
cummin_na <- function(x, useNA = TRUE) {
cum_na(x, cummin, useNA)
}
#' @rdname cumfuns
#' @export
cum_mid <- function(x, adj = 0.5) {
stopifnot(adj %inside% 0:1)
mat <- as.matrix(x)
res <- rbind(0, mat[-nrow(mat), , drop = FALSE])
res <- mat / (1 / adj) + apply(res, 2L, cumsum)
if (is.null(dim(x)))
drop(res) else res
}
#' \code{grep} for vectors
#'
#' \code{grep} vectors for patterns given by other vectors.
#'
#' @param pattern a vector to be matched
#' @param x vector having the same type as \code{pattern} where matches are
#' sought
#'
#' @return
#' For \code{vgrep}, a vector of indices indicating the start of the matches
#' found in \code{x}. For \code{vgrepl}, a list of logical vetors of
#' \code{length(x)} for each match found in \code{x}.
#'
#' @references
#' Adapted from \url{https://stackoverflow.com/q/33027611/2994949}
#'
#' @seealso
#' \code{\link{grep}}; \code{\link[rawr]{mgrep}}; \code{\link[rawr]{\%==\%}}
#'
#' @examples
#' x <- c(0,1,1,0,1,1,NA,1,1,0,1,1,NA,1,0,0,1,
#' 0,1,1,1,NA,1,0,1,NA,1,NA,1,0,1,0,NA,1)
#' vgrep(c(1, NA, 1), x)
#' vgrepl(c(1, NA, 1), x)
#'
#' vgrep(c(1, 0, 1, NA), x)
#' which(vgrepl(c(1, 0, 1, NA), x)[[1]])
#'
#' @export
vgrep <- function(pattern, x) {
vgrep_ <- function(pp, xx, acc = if (length(pp))
seq_along(xx) else integer(0L)) {
if (!length(pp))
return(acc)
Recall(pp[-1L], xx, acc[which(pp[[1L]] %==% xx[acc])] + 1L)
}
vgrep_(pattern, x) - length(pattern)
}
#' @rdname vgrep
#' @export
vgrepl <- function(pattern, x) {
m <- vgrep(pattern, x)
lp <- length(pattern)
pp <- rep(FALSE, length(x))
if (!length(m))
integer(0L) else lapply(m, function(y) {
pp[y:(y + lp - 1L)] <- TRUE
pp
})
}
#' Justify text
#'
#' Add whitespace to (monospaced) text for justified or block-style spacing.
#'
#' @param string a character string
#' @param width desired width text in characters given as a positive integer
#' @param fill method of adding whitespace, i.e., by starting with the
#' \code{"right"}- or \code{"left"}-most whitespace or \code{"random"}
#'
#' @seealso
#' \code{\link{strwrap}}
#'
#' @references
#' Adapted from \url{https://stackoverflow.com/q/34710597/2994949}
#'
#' @examples
#' x <- paste(rownames(mtcars), collapse = ' ')
#' cat(justify(x))
#'
#' ## slight differences in whitespace for fill methods
#' op <- par(xpd = NA, family = 'mono', cex = 0.8)
#' plot(0, ann = FALSE, axes = FALSE, type = 'n')
#' text(1, 1, justify(x, fill = 'random'))
#' text(1, 0, justify(x, fill = 'right'), col = 2)
#' text(1, -1, justify(x, fill = 'left'), col = 3)
#' par(op)
#'
#' @export
justify <- function(string, width = getOption('width') - 10L,
fill = c('random', 'right', 'left')) {
fill <- match.arg(fill)
string <- gsub('\n', '\n\n', string, fixed = TRUE)
strs <- strwrap(string, width = width)
paste(fill_spaces_(strs, width, fill), collapse = '\n')
}
fill_spaces_ <- function(lines, width, fill) {
tokens <- strsplit(lines, '\\s+')
res <- lapply(head(tokens, -1L), function(x) {
nspace <- max(length(x) - 1L, 1L)
extra <- width - sum(nchar(x)) - nspace
reps <- extra %/% nspace
extra <- extra %% nspace
times <- rep.int(if (reps > 0L) reps + 1L else 1L, nspace)
if (extra > 0L) {
if (fill == 'right')
times[seq.int(extra)] <- times[seq.int(extra)] + 1L
else if (fill == 'left')
times[(nspace - extra + 1L):nspace] <-
times[(nspace - extra + 1L):nspace] + 1L
else times[inds] <- times[(inds <- sample(nspace, extra))] + 1L
}
spaces <- c('', unlist(lapply(times, formatC, x = ' ', digits = NULL)))
res <- paste(c(rbind(spaces, x)), collapse = '')
if (sum(c(nchar(x), length(x), extra)) < width / 2)
gsub('\\s{1,}', ' ', res) else res
})
c(res, paste(tail(tokens, 1L)[[1L]], collapse = ' '))
}
#' Find factors
#'
#' Find common factors of two or more integers.
#'
#' @param ... integers
#'
#' @examples
#' factors(21)
#' factors(3 * 2 ^ 20)
#' factors(64, 128, 58)
#'
#' @export
factors <- function(...) {
factors_ <- function(x) {
x <- as.integer(x)
y <- seq_len(abs(x))
y[x %% y == 0L]
}
l <- lapply(list(...), factors_)
Reduce(intersect, l)
}
#' Sample each
#'
#' Returns a logical vector where \code{n} items are randomly sampled from
#' each unique value of a vector, \code{x}.
#'
#' @param x a character, factor, or numeric vector
#' @param n number to sample from each unique group in order; if \code{x} is
#' a factor, \code{n} should correspond to \code{levels(x)}; otherwise,
#' \code{n} will be matched with the sorted unique groups
#'
#' @return
#' A logical vector the same length as \code{x} identifying selected indices.
#'
#' @seealso
#' \code{\link{sample}}; \code{\link{kinda_sort}}
#'
#' @examples
#' x <- mtcars$gear
#'
#' sample_each(x)
#' mtcars[sample_each(x), ]
#'
#' ## compare numeric vs factor vectors (see description above)
#' mtcars[sample_each(x, 3:5), ]
#' X <- factor(x, 5:3)
#' mtcars[sample_each(X, 3:5), ]
#'
#' @export
sample_each <- function(x, n = 1L) {
x <- setNames(x, x)
lx <- table(x)
nT <- setNames(rep_len(n, length(lx)), names(lx))
nF <- lx - nT
x <- as.character(x)
idx <- ave(x, x, FUN = function(xx)
sample(rep(0:1, c(nF[xx[1L]], nT[xx[1L]]))))
!!as.numeric(idx)
}
#' Pick elements from columns
#'
#' This function will return \code{\link{colnames}} or column values (if
#' \code{value = TRUE}) for "indicator-like" matrices or data frames.
#'
#' @param data a data frame or matrix
#' @param ind if \code{value = FALSE} (default), a vector (usually a single
#' value) to match and return column name(s) of \code{data} where \code{ind}
#' is found; if \code{value = TRUE}, a vector of values to be \emph{ignored}
#' @param value logical; if \code{TRUE}, returns column value(s); otherwise,
#' returns column name(s) (default)
#'
#' @return
#' If \code{value} is \code{FALSE} (default), the column names of \code{data}
#' for which each row of \code{data} contained \code{ind}.
#'
#' If \code{value} is \code{TRUE}, the column values of \code{data} which are
#' \emph{not} values of \code{ind}.
#'
#' @examples
#' set.seed(1)
#' ss <- sample(10)
#' dd <- as.matrix(ftable(1:10, ss))
#'
#' all(pickcol(dd) == ss)
#'
#' rn <- rnorm(10)
#' dd[dd == 1] <- rn
#' all(pickcol(dd, value = TRUE, ind = 0) == rowSums(dd))
#'
#'
#' dd <- data.frame(
#' x = c(1, 0, 0),
#' y = c(0, 0, 1),
#' z = c(0, 1, 0),
#' a = c('one', '', ''),
#' b = c('', '', 'three'),
#' c = c('', 'two', '')
#' )
#'
#' pickcol(dd[1:2])
#' pickcol(dd[1:2], 0)
#' pickcol(dd[1:3] + 1, ind = 2)
#'
#' pickcol(dd[4:6], value = TRUE, ind = '')
#' pickcol(dd, value = TRUE, ind = c('', 0:1))
#'
#' dd[dd == ''] <- NA
#' pickcol(dd[4:6], value = TRUE)
#'
#' @export
pickcol <- function(data, ind = 1L, value = FALSE) {
res <- apply(data, 1L, function(x) {
if (value) {
x[x %in% ind] <- NA
if (length(x <- x[!is.na(x)]) > 1L)
toString(x) else x
} else {
idx <- x %in% ind
if (sum(idx))
toString(names(x[idx])) else NA
}
})
unname(res)
}
#' Number of unique values
#'
#' @param x a vector
#' @param na.rm logical; if \code{TRUE}, \code{NA} will not be counted as a
#' unique level; default is to include
#'
#' @examples
#' x <- c(1:5, NA)
#' lunique(factor(x))
#' lunique(x, TRUE)
#'
#' @export
lunique <- function(x, na.rm = FALSE) {
length(unique(if (na.rm) sort(x) else x))
}
#' Remove non ASCII characters
#'
#' @param x a character vector
#'
#' @export
rm_nonascii <- function(x) {
gsub('[^\x20-\x7E]', '', x)
}
#' \code{datatable}s with sparklines
#'
#' Create an HTML table widget using the JavaScript library DataTables
#' (\code{\link[DT]{datatable}}) with \code{\link[sparkline]{sparkline}}
#' columns.
#'
#' @param data a data frame or matrix
#' @param spark a \emph{named} list of lists for each column of \code{data}
#' for which an interactive sparkline will replace each row cell
#'
#' each named list of \code{spark} should have length \code{nrow(data)} and
#' contain at least one numeric value
#' @param type the type of sparkline, one or more of "line", "bar", "box1",
#' or "box2", recycled as needed; the only difference between "box1" and
#' "box2" is the use of \code{spark_range}
#' @param spark_range an optional list or vector (recycled as needed) giving
#' the overall range for each list of \code{spark}; if missing, the ranges
#' will be calculated; note this is only applicable for \code{type = "line"}
#' or \code{type = "box1"}
#' @param options,... \code{options} or additional arguments passed to
#' \code{\link[DT]{datatable}}
#'
#' @seealso
#' Adapted from \url{leonawicz.github.io/HtmlWidgetExamples/ex_dt_sparkline.html}
#'
#' @examples
#' \dontrun{
#' library('DT')
#'
#' ## strings of data separated by commas should be passed to each row
#' ## this data will be used to generate the sparkline
#' dd <- aggregate(cbind(wt, mpg) ~ gear, mtcars, function(x)
#' toString(fivenum(x)))
#' sparkDT(dd)
#'
#'
#' ## for each column, create a list of vectors for each row to be plotted
#' l <- sapply(c('wt', 'mpg'), function(x)
#' split(mtcars[, x], mtcars$gear), simplify = FALSE, USE.NAMES = TRUE)
#'
#' sparkDT(dd, l, type = 'box1')
#'
#'
#' set.seed(1)
#' spark <- replicate(nrow(mtcars), round(rnorm(sample(20:100, 1)), 2), FALSE)
#'
#' sparkDT(mtcars, list(mpg = spark, wt = spark, disp = spark, qsec = spark))
#'
#' sparkDT(mtcars, list(mpg = spark, wt = spark, disp = spark, qsec = spark),
#' spark_range = list(disp = c(-5, 5), mpg = c(0, 10)))
#'
#' ## note difference between box1 (boxes aligned) and box2 (max size)
#' sparkDT(mtcars[, c('mpg', 'wt')],
#' list(mpg = spark, wt = spark),
#' type = c('box1', 'box2'),
#' # range = c(-2, 2),
#' rownames = FALSE,
#' colnames = c('box1', 'box2')
#' )
#' }
#'
#' @export
sparkDT <- function(data, spark, type = c('line', 'bar', 'box1', 'box2'),
spark_range, options = list(), ...) {
data <- as.data.frame(data)
if (missing(spark))
return(DT::datatable(data = data, ..., options = options))
srange <- lapply(spark, function(x) range(unlist(x), na.rm = TRUE))
spark_range <- if (missing(spark_range))
srange
else if (!is.list(spark_range))
setNames(list(spark_range)[rep_len(1L, length(spark))], names(spark))
else if (length(names(spark_range)))
modifyList(srange, spark_range)
else setNames(spark_range, names(spark))
spark_range <- spark_range[names(spark)]
type <- match.arg(type, several.ok = TRUE)
type <- rep_len(type, length(spark))
stopifnot(
all(names(spark) %in% names(data))
)
spark <- rapply(spark, paste, collapse = ', ', how = 'list')
data[, names(spark)] <- lapply(spark, unlist)
render_sparkDT(data, names(spark), type, spark_range, options, ...)
}
render_sparkDT <- function(data, variables, type, range, options, ...) {
## catch case of rownames = FALSE - first spark col does not render
dots <- lapply(substitute(alist(...))[-1L], eval)
if (identical(dots$rownames, FALSE))
dots$rownames <- rep_len('', nrow(data))
targets <- match(variables, names(data))
idx <- seq_along(targets)
## each column definition and type need a distinct class with variable name
columnDefs <- lapply(idx, function(ii)
list(
targets = targets[ii],
render = DT::JS(
sprintf("function(data, type, full){ return '<span class=spark%s>' + data + '</span>' }",
variables[ii])
)
)
)
type <- lapply(idx, function(ii) {
bar <- "type: 'bar' , barColor: 'orange', negBarColor: 'purple', highlightColor: 'black'"
line <- "type: 'line', lineColor: 'black', fillColor: '#cccccc', highlightLineColor: 'orange', highlightSpotColor: 'orange'"
box <- "type: 'box' , lineColor: 'black', whiskerColor: 'black' , outlierFillColor: 'black' , outlierLineColor: 'black', medianColor: 'black', boxFillColor: 'orange', boxLineColor: 'black'"
r <- range[[ii]]
line_range <- sprintf('%s , chartRangeMin: %s , chartRangeMax: %s',
line, r[1L], r[2L])
box_range <- sprintf('%s , chartRangeMin: %s , chartRangeMax: %s',
box, r[1L], r[2L])
types <- list(bar = bar, line = line_range, box1 = box_range, box2 = box)
types[match(type[ii], names(types))]
})
js <- sapply(idx, function(ii) sprintf(
"$('.spark%s:not(:has(canvas))').sparkline('html', { %s }); \n",
variables[ii], type[[ii]])
)
js <- sprintf(
'function (oSettings, json) {\n %s }\n', paste(js, collapse = '\n')
)
oo <- list(columnDefs = columnDefs, fnDrawCallback = DT::JS(js))
dt <- do.call(
DT::datatable,
c(list(data = data, options = modifyList(options, oo)), dots)
)
dt$dependencies <-
c(dt$dependencies, htmlwidgets::getDependency('sparkline'))
dt
}
#' Clear workspace
#'
#' Clear the workspace by removing all objects in \code{\link{ls}} followed
#' by \code{\link[=gc]{garbage collection}}.
#'
#' @param all.names logical; if \code{TRUE}, also removes hidden (dot) objects
#'
#' @seealso
#' \code{\link{clear}}
#'
#' @export
clc <- function(all.names = FALSE) {
rm(list = ls(.GlobalEnv, all.names = all.names), envir = .GlobalEnv)
gc(TRUE)
invisible(NULL)
}
#' Clear console
#'
#' Clear the console window.
#'
#' @param ... ignored
#'
#' @seealso
#' \code{\link{clc}}
#'
#' @export
clear <- function(...) {
cat('\014')
}
#' Re-load a package
#'
#' Detach and re-load a package.
#'
#' @param package package name, as a \code{\link{name}} or literal character
#' string
#'
#' @export
reload <- function(package) {
if (!is.character(substitute(package)))
package <- deparse(substitute(package))
# tryCatch(
# detach(paste0('package:', package), unload = TRUE, character.only = TRUE),
# error = function(e) NULL
# )
ok <- tryCatch(
{unloadNamespace(package); library(package, character.only = TRUE); TRUE},
error = function(e) {print(e); FALSE}
)
invisible(ok)
}
|
library(shiny)
library(vroom)
library(tidyverse)
injuries <- vroom::vroom("neiss/injuries.tsv.gz")
products <- vroom::vroom("neiss/products.tsv")
population <- vroom::vroom("neiss/population.tsv")
ui <- fluidPage(
fluidRow(
column(6,
selectInput("code", "Product", setNames(products$prod_code, products$title))
)
),
fluidRow(
column(4, tableOutput("diag")),
column(4, tableOutput("body_part")),
column(4, tableOutput("location"))
),
fluidRow(
column(12, plotOutput("age_sex"))
)
)
server <- function(input, output, session) {
selected <- reactive(injuries %>% filter(prod_code == input$code))
output$diag <- renderTable(
selected() %>% count(diag, wt = weight, sort = TRUE)
)
output$body_part <- renderTable(
selected() %>% count(body_part, wt = weight, sort = TRUE)
)
output$location <- renderTable(
selected() %>% count(location, wt = weight, sort = TRUE)
)
summary <- reactive({
selected() %>%
count(age, sex, wt = weight) %>%
left_join(population, by = c("age", "sex")) %>%
mutate(rate = n / population * 1e4)
})
output$age_sex <- renderPlot({
summary() %>%
ggplot(aes(age, n, colour = sex)) +
geom_line() +
labs(y = "Estimated number of injuries") +
theme_grey(15)
})
}
shinyApp(ui = ui, server = server) | /problem-set-4-helfrich/app-1.R | no_license | ehelfrich/Visual_Analytics_DSBA-5122 | R | false | false | 1,354 | r | library(shiny)
library(vroom)
library(tidyverse)
injuries <- vroom::vroom("neiss/injuries.tsv.gz")
products <- vroom::vroom("neiss/products.tsv")
population <- vroom::vroom("neiss/population.tsv")
ui <- fluidPage(
fluidRow(
column(6,
selectInput("code", "Product", setNames(products$prod_code, products$title))
)
),
fluidRow(
column(4, tableOutput("diag")),
column(4, tableOutput("body_part")),
column(4, tableOutput("location"))
),
fluidRow(
column(12, plotOutput("age_sex"))
)
)
server <- function(input, output, session) {
selected <- reactive(injuries %>% filter(prod_code == input$code))
output$diag <- renderTable(
selected() %>% count(diag, wt = weight, sort = TRUE)
)
output$body_part <- renderTable(
selected() %>% count(body_part, wt = weight, sort = TRUE)
)
output$location <- renderTable(
selected() %>% count(location, wt = weight, sort = TRUE)
)
summary <- reactive({
selected() %>%
count(age, sex, wt = weight) %>%
left_join(population, by = c("age", "sex")) %>%
mutate(rate = n / population * 1e4)
})
output$age_sex <- renderPlot({
summary() %>%
ggplot(aes(age, n, colour = sex)) +
geom_line() +
labs(y = "Estimated number of injuries") +
theme_grey(15)
})
}
shinyApp(ui = ui, server = server) |
require(ggplot2)
require(dplyr)
require(lubridate)
rm(list=ls())
gastos <- readRDS(file = 'cota_parlamentar_preprocessado.RDS')
cnpj.features <- readRDS(file='./cnpj_features.RDS')
d <- gastos %>%
left_join(cnpj.features,by = c('cnpj8'='cnpj8')) %>%
mutate(idade.empresa = difftime(datEmissao,data_inicio_atividade,units = 'days'),
idade.empresa = as.numeric(idade.empresa),
txNomeParlamentar = toupper(txNomeParlamentar))
rm(gastos,cnpj.features)
#https://g1.globo.com/distrito-federal/noticia/apos-ser-flagrado-por-app-deputado-devolve-a-camara-r-727-por-13-refeicoes-no-mesmo-dia.ghtml
# Deputado Celso Maldener, que apresentou 12 notas fiscais de refei��es consumidas em um �nico dia e em um unico estabelecimento, em valores que variam entre R$23,00 e R$87.78, totalizando R$ 727,78.
maldener <- d %>%
filter(txNomeParlamentar=='CELSO MALDANER' &
datEmissao=='2011-09-05'&
codTipo=='13.0' &
txtCNPJCPF=='00984060000277')
maldener$vlrRestituicao %>% sum(na.rm = T)
#https://medium.com/data-science-brigade/o-que-a-resposta-do-dep-marcon-%C3%A0-rosie-t%C3%AAm-a-nos-dizer-sobre-o-trabalho-da-serenata-de-amor-c7f898a4655f
marcon <- d %>%
filter(txNomeParlamentar=='MARCON' & datEmissao=='2017-02-26')
#https://www.jornaldocomercio.com/_conteudo/2017/05/politica/562699-parlamentares-federais-gauchos-esclarecem-despesas-fiscalizadas-por-levantamento-eletronico.html
assismelo <- d %>%
filter(txNomeParlamentar=='ASSIS MELO' & vlrDocumento==103.6)
#https://www.em.com.br/app/noticia/politica/2013/01/13/interna_politica,343003/deputados-utilizam-verba-de-custeio-para-pagar-gastos-de-campanha.shtml
ruicarneiro <- d %>%
filter(txNomeParlamentar=='RUY CARNEIRO'& numAno=='2012' & codTipo=='15.0')
hiran <- d %>%
filter(txNomeParlamentar=='HIRAN GON�ALVES' & txtCNPJCPF=='13182427000108')
#https://istoe.com.br/rosa-weber-autoriza-inquerito-contra-utilizacao-irregular-de-cota-parlamentar/
atosdois <- d %>%
filter(txtCNPJCPF=='13182427000108')
atosdois %>% select(txNomeParlamentar,sgPartido) %>% unique()
#https://pnoticias.com.br/noticia/politica/238862-deputados-usam-cota-parlamentar-para-alugar-carros-de-empresas-investigadas-por-fraude
brlocadora <- d %>% filter(txtCNPJCPF=='10644834000193')
#https://www.otempo.com.br/politica/andre-moura-e-outros-29-deputados-sao-investigados-por-fraudes-1.1389605
cloud_technology <- d %>% filter(txtCNPJCPF=='17589509000114')
suspeitos <- bind_rows(
assismelo %>% select(idGasto) %>% mutate(caso = 'assismelo'),
atosdois %>% select(idGasto) %>% mutate(caso = 'atosdois'),
brlocadora %>% select(idGasto) %>% mutate(caso = 'brlocadora'),
cloud_technology %>% select(idGasto) %>% mutate(caso = 'cloud_technology'),
hiran %>% select(idGasto) %>% mutate(caso = 'hiran'),
maldener %>% select(idGasto) %>% mutate(caso = 'maldener'),
marcon %>% select(idGasto) %>% mutate(caso = 'marcon'),
ruicarneiro %>% select(idGasto) %>% mutate(caso = 'ruicarneiro')
)
saveRDS(suspeitos,file='./suspeitos.RDS')
#
# igpm <- read.table('inflacao.csv',sep = '\t',header = T)
#
# periodo <- igpm[1:166,]
# x <- 1+((periodo$indice)/100)
# total <- 1
# for (i in 1:166){
# total <- total*x[i]
# }
#
# 7075 * 2.268
# 16047/7075 | /00 exploratory analysis on known cases.R | no_license | Leoteles/cota-parlamentar | R | false | false | 3,281 | r | require(ggplot2)
require(dplyr)
require(lubridate)
rm(list=ls())
gastos <- readRDS(file = 'cota_parlamentar_preprocessado.RDS')
cnpj.features <- readRDS(file='./cnpj_features.RDS')
d <- gastos %>%
left_join(cnpj.features,by = c('cnpj8'='cnpj8')) %>%
mutate(idade.empresa = difftime(datEmissao,data_inicio_atividade,units = 'days'),
idade.empresa = as.numeric(idade.empresa),
txNomeParlamentar = toupper(txNomeParlamentar))
rm(gastos,cnpj.features)
#https://g1.globo.com/distrito-federal/noticia/apos-ser-flagrado-por-app-deputado-devolve-a-camara-r-727-por-13-refeicoes-no-mesmo-dia.ghtml
# Deputado Celso Maldener, que apresentou 12 notas fiscais de refei��es consumidas em um �nico dia e em um unico estabelecimento, em valores que variam entre R$23,00 e R$87.78, totalizando R$ 727,78.
maldener <- d %>%
filter(txNomeParlamentar=='CELSO MALDANER' &
datEmissao=='2011-09-05'&
codTipo=='13.0' &
txtCNPJCPF=='00984060000277')
maldener$vlrRestituicao %>% sum(na.rm = T)
#https://medium.com/data-science-brigade/o-que-a-resposta-do-dep-marcon-%C3%A0-rosie-t%C3%AAm-a-nos-dizer-sobre-o-trabalho-da-serenata-de-amor-c7f898a4655f
marcon <- d %>%
filter(txNomeParlamentar=='MARCON' & datEmissao=='2017-02-26')
#https://www.jornaldocomercio.com/_conteudo/2017/05/politica/562699-parlamentares-federais-gauchos-esclarecem-despesas-fiscalizadas-por-levantamento-eletronico.html
assismelo <- d %>%
filter(txNomeParlamentar=='ASSIS MELO' & vlrDocumento==103.6)
#https://www.em.com.br/app/noticia/politica/2013/01/13/interna_politica,343003/deputados-utilizam-verba-de-custeio-para-pagar-gastos-de-campanha.shtml
ruicarneiro <- d %>%
filter(txNomeParlamentar=='RUY CARNEIRO'& numAno=='2012' & codTipo=='15.0')
hiran <- d %>%
filter(txNomeParlamentar=='HIRAN GON�ALVES' & txtCNPJCPF=='13182427000108')
#https://istoe.com.br/rosa-weber-autoriza-inquerito-contra-utilizacao-irregular-de-cota-parlamentar/
atosdois <- d %>%
filter(txtCNPJCPF=='13182427000108')
atosdois %>% select(txNomeParlamentar,sgPartido) %>% unique()
#https://pnoticias.com.br/noticia/politica/238862-deputados-usam-cota-parlamentar-para-alugar-carros-de-empresas-investigadas-por-fraude
brlocadora <- d %>% filter(txtCNPJCPF=='10644834000193')
#https://www.otempo.com.br/politica/andre-moura-e-outros-29-deputados-sao-investigados-por-fraudes-1.1389605
cloud_technology <- d %>% filter(txtCNPJCPF=='17589509000114')
suspeitos <- bind_rows(
assismelo %>% select(idGasto) %>% mutate(caso = 'assismelo'),
atosdois %>% select(idGasto) %>% mutate(caso = 'atosdois'),
brlocadora %>% select(idGasto) %>% mutate(caso = 'brlocadora'),
cloud_technology %>% select(idGasto) %>% mutate(caso = 'cloud_technology'),
hiran %>% select(idGasto) %>% mutate(caso = 'hiran'),
maldener %>% select(idGasto) %>% mutate(caso = 'maldener'),
marcon %>% select(idGasto) %>% mutate(caso = 'marcon'),
ruicarneiro %>% select(idGasto) %>% mutate(caso = 'ruicarneiro')
)
saveRDS(suspeitos,file='./suspeitos.RDS')
#
# igpm <- read.table('inflacao.csv',sep = '\t',header = T)
#
# periodo <- igpm[1:166,]
# x <- 1+((periodo$indice)/100)
# total <- 1
# for (i in 1:166){
# total <- total*x[i]
# }
#
# 7075 * 2.268
# 16047/7075 |
#' Smooth the trafficvolumne around one point
#'
#' @param roads an object as returned by \link{get_opentransportmap_data}
#' @param point on element as returned by \link{format_dt_to_points}
#' @param lambda smoothing parameter
#'
#' @return numerical value of the estimated trafficvolumne
#' @export
smooth_loaction <- function(roads, point, lambda = 0.05) {
roads$distance <- sf::st_distance(point, roads)[1, ]
sum(exp(-lambda * as.numeric(roads$distance)) * log(roads$trafficvol), na.rm = TRUE)
}
#' Optimize a lambda parameter for the traffic volume
#'
#' @param sensor_data data as returned by \link{get_sensor_measured_values}
#' @param sensors data as returned by \link{get_sensors}
#' @param validation_plot Plot a validation chart? default is TRUE
#' @param lambda_range over which range should lambda be optimized
#' @param mc.cores how much cores should be used for parallelization, default is
#' one core less your maximum number of detected cores.
#' @param roads as returned by \link{get_opentransportmap_data}
#'
#' @return the lambda which maximizes the correlation between the measured
#' PM values and the trafficvolumne data
#' @export
optim_lambda <- function(sensor_data, sensors, roads, validation_plot = TRUE,
lambda_range = seq(0.001, 0.02, length.out = 10),
mc.cores = parallel::detectCores() - 1) {
sensor_data <- sensor_data[, .(value = mean(value)), by = list(locid)]
sensor_data <- data.table(merge(sensor_data, sensors,
by.y = 'id', by.x = 'locid', all.y = TRUE))
sensor_points <- spAirPol:::format_dt_to_points(sensors)
optim_me <- function(lambda) {
ll <- lapply(1:length(sensor_points), function(x) {
sum(sapply(roads,
function(y) smooth_loaction(y, sensor_points[x], lambda = lambda)))
})
data_traffic <- sensors
data_traffic$trafficvol <- unlist(ll)
d <- data.table(dplyr::inner_join(data_traffic, sensor_data))
print(abs(cor(d$trafficvol, d$value, use = 'pair')))
cor(d$trafficvol, d$value, use = 'pair')
}
r <- pbmcapply::pbmclapply(lambda_range, optim_me,
mc.cores = parallel::detectCores() - 1)
if (validation_plot) {
print(ggplot(data = data.frame(cor = unlist(r), lambda = lambda_range),
aes(x = lambda, y = cor)) +
geom_point() +
geom_line() +
theme_classic())
}
lambda_range[which.max(unlist(r))]
}
| /R/optim_lambda.R | no_license | maxikellerbauer/stAirPol | R | false | false | 2,472 | r | #' Smooth the trafficvolumne around one point
#'
#' @param roads an object as returned by \link{get_opentransportmap_data}
#' @param point on element as returned by \link{format_dt_to_points}
#' @param lambda smoothing parameter
#'
#' @return numerical value of the estimated trafficvolumne
#' @export
smooth_loaction <- function(roads, point, lambda = 0.05) {
roads$distance <- sf::st_distance(point, roads)[1, ]
sum(exp(-lambda * as.numeric(roads$distance)) * log(roads$trafficvol), na.rm = TRUE)
}
#' Optimize a lambda parameter for the traffic volume
#'
#' @param sensor_data data as returned by \link{get_sensor_measured_values}
#' @param sensors data as returned by \link{get_sensors}
#' @param validation_plot Plot a validation chart? default is TRUE
#' @param lambda_range over which range should lambda be optimized
#' @param mc.cores how much cores should be used for parallelization, default is
#' one core less your maximum number of detected cores.
#' @param roads as returned by \link{get_opentransportmap_data}
#'
#' @return the lambda which maximizes the correlation between the measured
#' PM values and the trafficvolumne data
#' @export
optim_lambda <- function(sensor_data, sensors, roads, validation_plot = TRUE,
lambda_range = seq(0.001, 0.02, length.out = 10),
mc.cores = parallel::detectCores() - 1) {
sensor_data <- sensor_data[, .(value = mean(value)), by = list(locid)]
sensor_data <- data.table(merge(sensor_data, sensors,
by.y = 'id', by.x = 'locid', all.y = TRUE))
sensor_points <- spAirPol:::format_dt_to_points(sensors)
optim_me <- function(lambda) {
ll <- lapply(1:length(sensor_points), function(x) {
sum(sapply(roads,
function(y) smooth_loaction(y, sensor_points[x], lambda = lambda)))
})
data_traffic <- sensors
data_traffic$trafficvol <- unlist(ll)
d <- data.table(dplyr::inner_join(data_traffic, sensor_data))
print(abs(cor(d$trafficvol, d$value, use = 'pair')))
cor(d$trafficvol, d$value, use = 'pair')
}
r <- pbmcapply::pbmclapply(lambda_range, optim_me,
mc.cores = parallel::detectCores() - 1)
if (validation_plot) {
print(ggplot(data = data.frame(cor = unlist(r), lambda = lambda_range),
aes(x = lambda, y = cor)) +
geom_point() +
geom_line() +
theme_classic())
}
lambda_range[which.max(unlist(r))]
}
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComAdobeGraniteAuthOauthImplTwitterProviderImplInfo Class
#'
#' @field pid
#' @field title
#' @field description
#' @field properties
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfo <- R6::R6Class(
'ComAdobeGraniteAuthOauthImplTwitterProviderImplInfo',
public = list(
`pid` = NULL,
`title` = NULL,
`description` = NULL,
`properties` = NULL,
initialize = function(`pid`, `title`, `description`, `properties`){
if (!missing(`pid`)) {
stopifnot(is.character(`pid`), length(`pid`) == 1)
self$`pid` <- `pid`
}
if (!missing(`title`)) {
stopifnot(is.character(`title`), length(`title`) == 1)
self$`title` <- `title`
}
if (!missing(`description`)) {
stopifnot(is.character(`description`), length(`description`) == 1)
self$`description` <- `description`
}
if (!missing(`properties`)) {
stopifnot(R6::is.R6(`properties`))
self$`properties` <- `properties`
}
},
toJSON = function() {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject <- list()
if (!is.null(self$`pid`)) {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject[['pid']] <- self$`pid`
}
if (!is.null(self$`title`)) {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject[['title']] <- self$`title`
}
if (!is.null(self$`description`)) {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject[['description']] <- self$`description`
}
if (!is.null(self$`properties`)) {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject[['properties']] <- self$`properties`$toJSON()
}
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject
},
fromJSON = function(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoJson) {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject <- jsonlite::fromJSON(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoJson)
if (!is.null(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`pid`)) {
self$`pid` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`pid`
}
if (!is.null(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`title`)) {
self$`title` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`title`
}
if (!is.null(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`description`)) {
self$`description` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`description`
}
if (!is.null(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`properties`)) {
propertiesObject <- ComAdobeGraniteAuthOauthImplTwitterProviderImplProperties$new()
propertiesObject$fromJSON(jsonlite::toJSON(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$properties, auto_unbox = TRUE))
self$`properties` <- propertiesObject
}
},
toJSONString = function() {
sprintf(
'{
"pid": %s,
"title": %s,
"description": %s,
"properties": %s
}',
self$`pid`,
self$`title`,
self$`description`,
self$`properties`$toJSON()
)
},
fromJSONString = function(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoJson) {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject <- jsonlite::fromJSON(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoJson)
self$`pid` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`pid`
self$`title` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`title`
self$`description` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`description`
ComAdobeGraniteAuthOauthImplTwitterProviderImplPropertiesObject <- ComAdobeGraniteAuthOauthImplTwitterProviderImplProperties$new()
self$`properties` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplPropertiesObject$fromJSON(jsonlite::toJSON(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$properties, auto_unbox = TRUE))
}
)
)
| /clients/r/generated/R/ComAdobeGraniteAuthOauthImplTwitterProviderImplInfo.r | permissive | shinesolutions/swagger-aem-osgi | R | false | false | 4,465 | r | # Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComAdobeGraniteAuthOauthImplTwitterProviderImplInfo Class
#'
#' @field pid
#' @field title
#' @field description
#' @field properties
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfo <- R6::R6Class(
'ComAdobeGraniteAuthOauthImplTwitterProviderImplInfo',
public = list(
`pid` = NULL,
`title` = NULL,
`description` = NULL,
`properties` = NULL,
initialize = function(`pid`, `title`, `description`, `properties`){
if (!missing(`pid`)) {
stopifnot(is.character(`pid`), length(`pid`) == 1)
self$`pid` <- `pid`
}
if (!missing(`title`)) {
stopifnot(is.character(`title`), length(`title`) == 1)
self$`title` <- `title`
}
if (!missing(`description`)) {
stopifnot(is.character(`description`), length(`description`) == 1)
self$`description` <- `description`
}
if (!missing(`properties`)) {
stopifnot(R6::is.R6(`properties`))
self$`properties` <- `properties`
}
},
toJSON = function() {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject <- list()
if (!is.null(self$`pid`)) {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject[['pid']] <- self$`pid`
}
if (!is.null(self$`title`)) {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject[['title']] <- self$`title`
}
if (!is.null(self$`description`)) {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject[['description']] <- self$`description`
}
if (!is.null(self$`properties`)) {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject[['properties']] <- self$`properties`$toJSON()
}
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject
},
fromJSON = function(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoJson) {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject <- jsonlite::fromJSON(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoJson)
if (!is.null(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`pid`)) {
self$`pid` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`pid`
}
if (!is.null(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`title`)) {
self$`title` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`title`
}
if (!is.null(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`description`)) {
self$`description` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`description`
}
if (!is.null(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`properties`)) {
propertiesObject <- ComAdobeGraniteAuthOauthImplTwitterProviderImplProperties$new()
propertiesObject$fromJSON(jsonlite::toJSON(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$properties, auto_unbox = TRUE))
self$`properties` <- propertiesObject
}
},
toJSONString = function() {
sprintf(
'{
"pid": %s,
"title": %s,
"description": %s,
"properties": %s
}',
self$`pid`,
self$`title`,
self$`description`,
self$`properties`$toJSON()
)
},
fromJSONString = function(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoJson) {
ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject <- jsonlite::fromJSON(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoJson)
self$`pid` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`pid`
self$`title` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`title`
self$`description` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$`description`
ComAdobeGraniteAuthOauthImplTwitterProviderImplPropertiesObject <- ComAdobeGraniteAuthOauthImplTwitterProviderImplProperties$new()
self$`properties` <- ComAdobeGraniteAuthOauthImplTwitterProviderImplPropertiesObject$fromJSON(jsonlite::toJSON(ComAdobeGraniteAuthOauthImplTwitterProviderImplInfoObject$properties, auto_unbox = TRUE))
}
)
)
|
#' A function for creating a wordcloud from a text transcript
#'
#' @param input_path The path of the text file to turn into a wordcloud
#' @param save_path The path where you want the saved image to go. Should end in ".png".
#' @param cloud_width The width of your saved wordcloud image, in pixels. Defaults to 2000.
#' @param cloud_height The height of your saved wordcloud image, in pixels. Defaults to 2000.
#' @param cloud_res The resolution of your saved wordcloud image, in dpi Defaults to 300
#' @param cloud_max_words The maximum number of words you want to plot in your wordcloud. Defaults to 100.
#' @param color_pal The color palette that you want to use for your wordcloud. Choose one color if you want a monochrome wordcloud or a series of colors if you want it to vary the color by the relative frequency of the word. See the wordcloud documentation for details. Defaults to c(brewer.pal(5,"Blues")[3],brewer.pal(5,"Blues")[4],brewer.pal(5,"Blues")[5])
#' @keywords text analysis, wordcloud
#' @export
transcript_cloud <- function(input_path, save_path, cloud_width=2000, cloud_height=2000, cloud_res = 300, cloud_max_words = 100, color_pal = c(brewer.pal(5,"Blues")[3],brewer.pal(5,"Blues")[4],brewer.pal(5,"Blues")[5])){
library(tidyverse)
library(tidytext)
library(readtext)
library(wordcloud)
transcript <- readtext(input_path)
tidyTranscript <- transcript %>%
unnest_tokens(word, text)
data("stop_words")
tidyTranscript <- tidyTranscript %>%
anti_join(stop_words)
# Make wordcloud
png(save_path, width = cloud_width, height = cloud_height, res = cloud_res)
tidyTranscript %>%
count(word, sort = TRUE) %>%
with(wordcloud(word, n, max.words = cloud_max_words, random.order = FALSE, colors = color_pal))
dev.off()
} | /R/transcript_cloud.R | no_license | jscarlton/jscTools | R | false | false | 1,788 | r | #' A function for creating a wordcloud from a text transcript
#'
#' @param input_path The path of the text file to turn into a wordcloud
#' @param save_path The path where you want the saved image to go. Should end in ".png".
#' @param cloud_width The width of your saved wordcloud image, in pixels. Defaults to 2000.
#' @param cloud_height The height of your saved wordcloud image, in pixels. Defaults to 2000.
#' @param cloud_res The resolution of your saved wordcloud image, in dpi Defaults to 300
#' @param cloud_max_words The maximum number of words you want to plot in your wordcloud. Defaults to 100.
#' @param color_pal The color palette that you want to use for your wordcloud. Choose one color if you want a monochrome wordcloud or a series of colors if you want it to vary the color by the relative frequency of the word. See the wordcloud documentation for details. Defaults to c(brewer.pal(5,"Blues")[3],brewer.pal(5,"Blues")[4],brewer.pal(5,"Blues")[5])
#' @keywords text analysis, wordcloud
#' @export
transcript_cloud <- function(input_path, save_path, cloud_width=2000, cloud_height=2000, cloud_res = 300, cloud_max_words = 100, color_pal = c(brewer.pal(5,"Blues")[3],brewer.pal(5,"Blues")[4],brewer.pal(5,"Blues")[5])){
library(tidyverse)
library(tidytext)
library(readtext)
library(wordcloud)
transcript <- readtext(input_path)
tidyTranscript <- transcript %>%
unnest_tokens(word, text)
data("stop_words")
tidyTranscript <- tidyTranscript %>%
anti_join(stop_words)
# Make wordcloud
png(save_path, width = cloud_width, height = cloud_height, res = cloud_res)
tidyTranscript %>%
count(word, sort = TRUE) %>%
with(wordcloud(word, n, max.words = cloud_max_words, random.order = FALSE, colors = color_pal))
dev.off()
} |
# makes the random forest submission
library(randomForest)
train <- read.csv("data/train.csv", header=TRUE)
test <- read.csv("data/test.csv", header=TRUE)
labels <- as.factor(train[,1])
train <- train[,-1]
rf <- randomForest(train, labels, xtest=test, ntree=1000)
predictions <- levels(labels)[rf$test$predicted]
write(predictions, file="rf_benchmark.csv", ncolumns=1)
| /Digit Recognizer/rf_benchmark.R | no_license | cwbishop/Kaggle | R | false | false | 375 | r | # makes the random forest submission
library(randomForest)
train <- read.csv("data/train.csv", header=TRUE)
test <- read.csv("data/test.csv", header=TRUE)
labels <- as.factor(train[,1])
train <- train[,-1]
rf <- randomForest(train, labels, xtest=test, ntree=1000)
predictions <- levels(labels)[rf$test$predicted]
write(predictions, file="rf_benchmark.csv", ncolumns=1)
|
## archivist package for R
##
#' @title Search for an Artifact in the Repository Using Tags
#'
#' @description
#' \code{searchInRepo} searches for an artifact in the \link{Repository} using it's \link{Tags}.
#' To learn more about artifacts visit \link[archivist]{archivist-package}.
#'
#'
#' @details
#' \code{searchInRepo} searches for an artifact in the Repository using it's \code{Tag}
#' (e.g., \code{name}, \code{class} or \code{archiving date}). \code{Tags} are used in a \code{pattern}
#' parameter. For various artifact classes different \code{Tags} can be searched for.
#' See \link{Tags}. If a \code{pattern} is a list of length 2 then \code{md5hashes} of all
#' artifacts created from date \code{dateFrom} to date \code{dateTo} are returned. The date
#' should be formatted according to the YYYY-MM-DD format, e.g., \code{"2014-07-31"}.
#'
#' \code{Tags}, used in a \code{pattern} parameter, should be determined according to the
#' format: \code{"TagKey:TagValue"} - see examples.
#'
#' @return
#' \code{searchInRepo} returns character vector of \code{md5hashes} of artifacts that were searched for.
#' Those are hashes assigned to artifacts while they were saved in the Repository
#' by the \link{saveToLocalRepo} function. If the artifact
#' is not in the Repository then a logical value \code{FALSE} is returned.
#'
#' @param repoType A character containing a type of the remote repository. Currently it can be 'github' or 'bitbucket'.
#'
#' @param pattern If \code{fixed = TRUE}: a character denoting a \code{Tag} which is to be searched for in the Repository.
#' It is also possible to specify \code{pattern} as a list of
#' length 2 with \code{dateFrom} and \code{dateTo}; see details. If \code{fixed = FALSE}: a regular expression
#' specifying the beginning of a \code{Tag}, which will be used to search for artifacts. If of length more than one and if
#' \code{intersect = TRUE} then artifacts that match all conditions are returned. If \code{intersect = FALSE} then artifacts that match any condition
#' are returned. See examples.
#'
#' @param intersect A logical value. Used only when \code{length(pattern) > 1 & is.character(pattern)}.
#' See \code{pattern} for more details.
#'
#' @param repoDir A character denoting an existing directory in which artifacts will be searched for.
#'
#' @param repo While working with the Remote repository. A character containing a name of the Remote repository on which the Repository is stored.
#' By default set to \code{NULL} - see \code{Note}.
#'
#' @param user While working with the Remote repository. A character containing a name of the Remote user on whose account the \code{repo} is created.
#' By default set to \code{NULL} - see \code{Note}.
#'
#' @param branch While working with the Remote repository. A character containing a name of
#' the Remote Repository's branch on which the Repository is stored. Default \code{branch} is \code{master}.
#'
#' @param fixed A logical value specifying how \code{artifacts} should be searched for.
#' If \code{fixed = TRUE} (default) then artifacts are searched for by using \code{pattern = "Tag"} argument.
#' If \code{fixed = FALSE} then artifacts are searched for by using \code{pattern = "regular expression"} argument.
#' The latter is wider and more flexible method, e.g.,
#' using \code{pattern = "name", fixed = FALSE} arguments enables to search for all artifacts in the \code{Repository}.
#'
#' @param subdir While working with the Remote repository. A character containing a name of a directory on the Remote repository
#' on which the Repository is stored. If the Repository is stored in the main folder of the Remote repository, this should be set
#' to \code{subdir = "/"} as default.
#'
#' @param ... Used for old deprecated functions.
#'
#' @note
#' If \code{repo}, \code{user}, \code{subdir} and \code{repoType} are not specified in the Remote mode then global parameters
#' set in \link{setRemoteRepo} function are used.
#'
#' Bug reports and feature requests can be sent to \href{https://github.com/pbiecek/archivist/issues}{https://github.com/pbiecek/archivist/issues}
#'
#' @author
#' Marcin Kosinski, \email{m.p.kosinski@@gmail.com}
#'
#' @examples
#' \dontrun{
#' # objects preparation
#'
#' showLocalRepo(method = "md5hashes",
#' repoDir = system.file("graphGallery", package = "archivist"))
#' showLocalRepo(method = "tags",
#' repoDir = system.file("graphGallery", package = "archivist"))
#'
#' # Tag search, fixed version
#' searchInLocalRepo( "class:ggplot", repoDir = exampleRepoDir )
#' searchInLocalRepo( "name:", repoDir = exampleRepoDir )
#' # Tag search, regex version
#' searchInLocalRepo( "class", repoDir = exampleRepoDir, fixed = FALSE )
#'
#' # Github version
#' # check the state of the Repository
#' summaryRemoteRepo( user="pbiecek", repo="archivist" )
#' showRemoteRepo( user="pbiecek", repo="archivist" )
#' showRemoteRepo( user="pbiecek", repo="archivist", method = "tags" )
#' # Tag search, fixed version
#' searchInRemoteRepo( "varname:Sepal.Width", user="pbiecek", repo="archivist" )
#' searchInRemoteRepo( "class:lm", user="pbiecek", repo="archivist", branch="master" )
#' searchInRemoteRepo( "name:myplot123", user="pbiecek", repo="archivist" )
#'
#' # Tag search, regex version
#' searchInRemoteRepo( "class", user="pbiecek", repo="archivist", fixed = FALSE )
#' searchInRemoteRepo( "name", user="pbiecek", repo="archivist", fixed = FALSE )
#'
#' # also on Github
#'
#' # Remeber to set dateTo parameter to actual date because sometimes we update datasets.
#' searchInRemoteRepo( pattern = list( dateFrom = "2015-10-01", dateTo = "2015-11-30" ),
#' user="pbiecek", repo="archivist", branch="master" )
#'
#'
#' # many archivist-like Repositories on one Remote repository
#'
#' searchInRemoteRepo( pattern = "name", user="MarcinKosinski", repo="Museum",
#' branch="master", subdir="ex1", fixed = FALSE )
#'
#' searchInRemoteRepo( pattern = "name", user="MarcinKosinski", repo="Museum",
#' branch="master", subdir="ex2", fixed = FALSE )
#'
#' # multi versions
#' searchInRemoteRepo( pattern=c("varname:Sepal.Width", "class:lm", "name:myplot123"),
#' user="pbiecek", repo="archivist", intersect = FALSE )
#'
#' }
#' @family archivist
#' @rdname searchInRepo
#' @export
searchInLocalRepo <- function( pattern, repoDir = aoptions("repoDir"), fixed = TRUE, intersect = TRUE ){
stopifnot( ( is.character( repoDir ) & length( repoDir ) == 1 ) | is.null( repoDir ) )
stopifnot( is.logical( c( fixed, intersect ) ), length( fixed ) == 1, length( intersect ) == 1 )
stopifnot( is.character( pattern ) | (is.list( pattern ) & length( pattern ) == 2) )
if ( is.character( pattern ) & length( pattern ) > 1 ) {
return(multiSearchInLocalRepoInternal(patterns = pattern, repoDir = repoDir, fixed = fixed, intersect = intersect))
}
repoDir <- checkDirectory( repoDir )
# extracts md5hash
if ( fixed ){
if ( length( pattern ) == 1 ){
md5hashES <- unique( executeSingleQuery( dir = repoDir,
paste0( "SELECT DISTINCT artifact FROM tag WHERE tag = ",
"'", pattern, "'" ) ) )
}else{
## length pattern == 2
md5hashES <- unique( executeSingleQuery( dir = repoDir,
paste0( "SELECT DISTINCT artifact FROM tag WHERE createdDate >",
"'", as.Date(pattern[[1]])-1, "'", " AND createdDate <",
"'", as.Date(pattern[[2]])+1, "'") ) ) }
}else{
# fixed = FALSE
md5hashES <- unique( executeSingleQuery( dir = repoDir,
paste0( "SELECT DISTINCT artifact FROM tag WHERE tag LIKE ",
"'", pattern, "%'" ) ) )
}
return( as.character( md5hashES[, 1] ) )
}
#' @rdname searchInRepo
#' @export
searchInRemoteRepo <- function( pattern, repo = aoptions("repo"), user = aoptions("user"), branch = "master", subdir = aoptions("subdir"),
repoType = aoptions("repoType"), fixed = TRUE, intersect = TRUE ){
stopifnot( (is.list( pattern ) & length( pattern ) == 2 ) | is.character( pattern ) )
stopifnot( is.logical( c( fixed, intersect ) ), length( fixed ) == 1, length( intersect ) == 1 )
if ( is.character( pattern ) & length( pattern ) > 1 ) {
return(multiSearchInRemoteRepoInternal(patterns = pattern, repo = repo, user = user, branch = branch,
subdir = subdir, repoType = repoType, fixed = fixed, intersect = intersect))
}
RemoteRepoCheck( repo, user, branch, subdir, repoType) # implemented in setRepo.R
# first download database
remoteHook <- getRemoteHook(repo=repo, user=user, branch=branch, subdir=subdir)
Temp <- downloadDB( remoteHook )
# extracts md5hash
if ( fixed ){
if ( length( pattern ) == 1 ){
md5hashES <- unique( executeSingleQuery( dir = Temp,
paste0( "SELECT artifact FROM tag WHERE tag = ",
"'", pattern, "'" ) ) )
}else{
# length pattern == 2
md5hashES <- unique( executeSingleQuery( dir = Temp,
paste0( "SELECT artifact FROM tag WHERE createdDate >",
"'", as.Date(pattern[[1]])-1, "'", " AND createdDate <",
"'", as.Date(pattern[[2]])+1, "'") ) ) }
}else{
# fixed FALSE
md5hashES <- unique( executeSingleQuery( dir = Temp,
paste0( "SELECT DISTINCT artifact FROM tag WHERE tag LIKE ",
"'", pattern, "%'" ) ) )
}
unlink( Temp, recursive = TRUE, force = TRUE)
return( as.character( md5hashES[, 1] ) )
}
multiSearchInLocalRepoInternal <- function( patterns, repoDir = aoptions("repoDir"), fixed = TRUE, intersect = TRUE ){
md5hs <- lapply(patterns, function(pattern) unique(searchInLocalRepo(pattern, repoDir=repoDir, fixed=fixed) ))
if (intersect) {
return(names(which(table(unlist(md5hs)) == length(md5hs))))
}
# union
unique(unlist(md5hs))
}
multiSearchInRemoteRepoInternal <- function( patterns, repo = aoptions("repo"), user = aoptions("user"), branch = "master", subdir = aoptions("subdir"),
repoType = aoptions("repoType"),
fixed = TRUE, intersect = TRUE ){
RemoteRepoCheck( repo, user, branch, subdir, repoType) # implemented in setRepo.R
remoteHook <- getRemoteHook(repo=repo, user=user, branch=branch, subdir=subdir)
Temp <- downloadDB( remoteHook )
on.exit( unlink( Temp, recursive = TRUE, force = TRUE))
m <- multiSearchInLocalRepoInternal( patterns, repoDir = Temp, fixed=fixed,
intersect=intersect)
return( m )
}
#' @family archivist
#' @rdname searchInRepo
#' @export
multiSearchInLocalRepo <- function(...) {
.Deprecated("multiSearchInLocalRepo is deprecated. Use searchInLocalRepo() instead.")
multiSearchInLocalRepoInternal(...)
}
#' @family archivist
#' @rdname searchInRepo
#' @export
multiSearchInRemoteRepo <- function(...) {
.Deprecated("multiSearchInRemoteRepo is deprecated. Use searchInRemoteRepo() instead.")
multiSearchInRemoteRepoInternal(...)
}
| /R/searchInRepo.R | no_license | cha63506/archivist-3 | R | false | false | 11,702 | r | ## archivist package for R
##
#' @title Search for an Artifact in the Repository Using Tags
#'
#' @description
#' \code{searchInRepo} searches for an artifact in the \link{Repository} using it's \link{Tags}.
#' To learn more about artifacts visit \link[archivist]{archivist-package}.
#'
#'
#' @details
#' \code{searchInRepo} searches for an artifact in the Repository using it's \code{Tag}
#' (e.g., \code{name}, \code{class} or \code{archiving date}). \code{Tags} are used in a \code{pattern}
#' parameter. For various artifact classes different \code{Tags} can be searched for.
#' See \link{Tags}. If a \code{pattern} is a list of length 2 then \code{md5hashes} of all
#' artifacts created from date \code{dateFrom} to date \code{dateTo} are returned. The date
#' should be formatted according to the YYYY-MM-DD format, e.g., \code{"2014-07-31"}.
#'
#' \code{Tags}, used in a \code{pattern} parameter, should be determined according to the
#' format: \code{"TagKey:TagValue"} - see examples.
#'
#' @return
#' \code{searchInRepo} returns character vector of \code{md5hashes} of artifacts that were searched for.
#' Those are hashes assigned to artifacts while they were saved in the Repository
#' by the \link{saveToLocalRepo} function. If the artifact
#' is not in the Repository then a logical value \code{FALSE} is returned.
#'
#' @param repoType A character containing a type of the remote repository. Currently it can be 'github' or 'bitbucket'.
#'
#' @param pattern If \code{fixed = TRUE}: a character denoting a \code{Tag} which is to be searched for in the Repository.
#' It is also possible to specify \code{pattern} as a list of
#' length 2 with \code{dateFrom} and \code{dateTo}; see details. If \code{fixed = FALSE}: a regular expression
#' specifying the beginning of a \code{Tag}, which will be used to search for artifacts. If of length more than one and if
#' \code{intersect = TRUE} then artifacts that match all conditions are returned. If \code{intersect = FALSE} then artifacts that match any condition
#' are returned. See examples.
#'
#' @param intersect A logical value. Used only when \code{length(pattern) > 1 & is.character(pattern)}.
#' See \code{pattern} for more details.
#'
#' @param repoDir A character denoting an existing directory in which artifacts will be searched for.
#'
#' @param repo While working with the Remote repository. A character containing a name of the Remote repository on which the Repository is stored.
#' By default set to \code{NULL} - see \code{Note}.
#'
#' @param user While working with the Remote repository. A character containing a name of the Remote user on whose account the \code{repo} is created.
#' By default set to \code{NULL} - see \code{Note}.
#'
#' @param branch While working with the Remote repository. A character containing a name of
#' the Remote Repository's branch on which the Repository is stored. Default \code{branch} is \code{master}.
#'
#' @param fixed A logical value specifying how \code{artifacts} should be searched for.
#' If \code{fixed = TRUE} (default) then artifacts are searched for by using \code{pattern = "Tag"} argument.
#' If \code{fixed = FALSE} then artifacts are searched for by using \code{pattern = "regular expression"} argument.
#' The latter is wider and more flexible method, e.g.,
#' using \code{pattern = "name", fixed = FALSE} arguments enables to search for all artifacts in the \code{Repository}.
#'
#' @param subdir While working with the Remote repository. A character containing a name of a directory on the Remote repository
#' on which the Repository is stored. If the Repository is stored in the main folder of the Remote repository, this should be set
#' to \code{subdir = "/"} as default.
#'
#' @param ... Used for old deprecated functions.
#'
#' @note
#' If \code{repo}, \code{user}, \code{subdir} and \code{repoType} are not specified in the Remote mode then global parameters
#' set in \link{setRemoteRepo} function are used.
#'
#' Bug reports and feature requests can be sent to \href{https://github.com/pbiecek/archivist/issues}{https://github.com/pbiecek/archivist/issues}
#'
#' @author
#' Marcin Kosinski, \email{m.p.kosinski@@gmail.com}
#'
#' @examples
#' \dontrun{
#' # objects preparation
#'
#' showLocalRepo(method = "md5hashes",
#' repoDir = system.file("graphGallery", package = "archivist"))
#' showLocalRepo(method = "tags",
#' repoDir = system.file("graphGallery", package = "archivist"))
#'
#' # Tag search, fixed version
#' searchInLocalRepo( "class:ggplot", repoDir = exampleRepoDir )
#' searchInLocalRepo( "name:", repoDir = exampleRepoDir )
#' # Tag search, regex version
#' searchInLocalRepo( "class", repoDir = exampleRepoDir, fixed = FALSE )
#'
#' # Github version
#' # check the state of the Repository
#' summaryRemoteRepo( user="pbiecek", repo="archivist" )
#' showRemoteRepo( user="pbiecek", repo="archivist" )
#' showRemoteRepo( user="pbiecek", repo="archivist", method = "tags" )
#' # Tag search, fixed version
#' searchInRemoteRepo( "varname:Sepal.Width", user="pbiecek", repo="archivist" )
#' searchInRemoteRepo( "class:lm", user="pbiecek", repo="archivist", branch="master" )
#' searchInRemoteRepo( "name:myplot123", user="pbiecek", repo="archivist" )
#'
#' # Tag search, regex version
#' searchInRemoteRepo( "class", user="pbiecek", repo="archivist", fixed = FALSE )
#' searchInRemoteRepo( "name", user="pbiecek", repo="archivist", fixed = FALSE )
#'
#' # also on Github
#'
#' # Remeber to set dateTo parameter to actual date because sometimes we update datasets.
#' searchInRemoteRepo( pattern = list( dateFrom = "2015-10-01", dateTo = "2015-11-30" ),
#' user="pbiecek", repo="archivist", branch="master" )
#'
#'
#' # many archivist-like Repositories on one Remote repository
#'
#' searchInRemoteRepo( pattern = "name", user="MarcinKosinski", repo="Museum",
#' branch="master", subdir="ex1", fixed = FALSE )
#'
#' searchInRemoteRepo( pattern = "name", user="MarcinKosinski", repo="Museum",
#' branch="master", subdir="ex2", fixed = FALSE )
#'
#' # multi versions
#' searchInRemoteRepo( pattern=c("varname:Sepal.Width", "class:lm", "name:myplot123"),
#' user="pbiecek", repo="archivist", intersect = FALSE )
#'
#' }
#' @family archivist
#' @rdname searchInRepo
#' @export
searchInLocalRepo <- function( pattern, repoDir = aoptions("repoDir"), fixed = TRUE, intersect = TRUE ){
stopifnot( ( is.character( repoDir ) & length( repoDir ) == 1 ) | is.null( repoDir ) )
stopifnot( is.logical( c( fixed, intersect ) ), length( fixed ) == 1, length( intersect ) == 1 )
stopifnot( is.character( pattern ) | (is.list( pattern ) & length( pattern ) == 2) )
if ( is.character( pattern ) & length( pattern ) > 1 ) {
return(multiSearchInLocalRepoInternal(patterns = pattern, repoDir = repoDir, fixed = fixed, intersect = intersect))
}
repoDir <- checkDirectory( repoDir )
# extracts md5hash
if ( fixed ){
if ( length( pattern ) == 1 ){
md5hashES <- unique( executeSingleQuery( dir = repoDir,
paste0( "SELECT DISTINCT artifact FROM tag WHERE tag = ",
"'", pattern, "'" ) ) )
}else{
## length pattern == 2
md5hashES <- unique( executeSingleQuery( dir = repoDir,
paste0( "SELECT DISTINCT artifact FROM tag WHERE createdDate >",
"'", as.Date(pattern[[1]])-1, "'", " AND createdDate <",
"'", as.Date(pattern[[2]])+1, "'") ) ) }
}else{
# fixed = FALSE
md5hashES <- unique( executeSingleQuery( dir = repoDir,
paste0( "SELECT DISTINCT artifact FROM tag WHERE tag LIKE ",
"'", pattern, "%'" ) ) )
}
return( as.character( md5hashES[, 1] ) )
}
#' @rdname searchInRepo
#' @export
searchInRemoteRepo <- function( pattern, repo = aoptions("repo"), user = aoptions("user"), branch = "master", subdir = aoptions("subdir"),
repoType = aoptions("repoType"), fixed = TRUE, intersect = TRUE ){
stopifnot( (is.list( pattern ) & length( pattern ) == 2 ) | is.character( pattern ) )
stopifnot( is.logical( c( fixed, intersect ) ), length( fixed ) == 1, length( intersect ) == 1 )
if ( is.character( pattern ) & length( pattern ) > 1 ) {
return(multiSearchInRemoteRepoInternal(patterns = pattern, repo = repo, user = user, branch = branch,
subdir = subdir, repoType = repoType, fixed = fixed, intersect = intersect))
}
RemoteRepoCheck( repo, user, branch, subdir, repoType) # implemented in setRepo.R
# first download database
remoteHook <- getRemoteHook(repo=repo, user=user, branch=branch, subdir=subdir)
Temp <- downloadDB( remoteHook )
# extracts md5hash
if ( fixed ){
if ( length( pattern ) == 1 ){
md5hashES <- unique( executeSingleQuery( dir = Temp,
paste0( "SELECT artifact FROM tag WHERE tag = ",
"'", pattern, "'" ) ) )
}else{
# length pattern == 2
md5hashES <- unique( executeSingleQuery( dir = Temp,
paste0( "SELECT artifact FROM tag WHERE createdDate >",
"'", as.Date(pattern[[1]])-1, "'", " AND createdDate <",
"'", as.Date(pattern[[2]])+1, "'") ) ) }
}else{
# fixed FALSE
md5hashES <- unique( executeSingleQuery( dir = Temp,
paste0( "SELECT DISTINCT artifact FROM tag WHERE tag LIKE ",
"'", pattern, "%'" ) ) )
}
unlink( Temp, recursive = TRUE, force = TRUE)
return( as.character( md5hashES[, 1] ) )
}
multiSearchInLocalRepoInternal <- function( patterns, repoDir = aoptions("repoDir"), fixed = TRUE, intersect = TRUE ){
md5hs <- lapply(patterns, function(pattern) unique(searchInLocalRepo(pattern, repoDir=repoDir, fixed=fixed) ))
if (intersect) {
return(names(which(table(unlist(md5hs)) == length(md5hs))))
}
# union
unique(unlist(md5hs))
}
multiSearchInRemoteRepoInternal <- function( patterns, repo = aoptions("repo"), user = aoptions("user"), branch = "master", subdir = aoptions("subdir"),
repoType = aoptions("repoType"),
fixed = TRUE, intersect = TRUE ){
RemoteRepoCheck( repo, user, branch, subdir, repoType) # implemented in setRepo.R
remoteHook <- getRemoteHook(repo=repo, user=user, branch=branch, subdir=subdir)
Temp <- downloadDB( remoteHook )
on.exit( unlink( Temp, recursive = TRUE, force = TRUE))
m <- multiSearchInLocalRepoInternal( patterns, repoDir = Temp, fixed=fixed,
intersect=intersect)
return( m )
}
#' @family archivist
#' @rdname searchInRepo
#' @export
multiSearchInLocalRepo <- function(...) {
.Deprecated("multiSearchInLocalRepo is deprecated. Use searchInLocalRepo() instead.")
multiSearchInLocalRepoInternal(...)
}
#' @family archivist
#' @rdname searchInRepo
#' @export
multiSearchInRemoteRepo <- function(...) {
.Deprecated("multiSearchInRemoteRepo is deprecated. Use searchInRemoteRepo() instead.")
multiSearchInRemoteRepoInternal(...)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/emr_operations.R
\name{add_emr_job_type}
\alias{add_emr_job_type}
\title{add_emr_job_type}
\usage{
add_emr_job_type(df, position_number_col_name = "Position Number",
suffix_col_name = "Suffix", mus_col_name = "MUS",
ecls_col_name = "PEAEMPL ECLS")
}
\arguments{
\item{df}{the dataframe containing the necessary columns and to which the new
column will be appended.}
\item{position_number_col_name}{the string containing the name of the
Position Number column}
\item{suffix_col_name}{the string containing the name of the
Suffix column}
\item{mus_col_name}{the string containing the name of the
MUS Contract Indicator column}
}
\value{
the original dataframe with a newly appended \code{EMRJobType} column
}
\description{
EMR Job Types are aggregated groups of employee classes. These are commonly
used as grouping variables for analyses. Job type derived from Position #,
Suffix, and MUS Contract indicator.
}
| /man/add_emr_job_type.Rd | no_license | iancj88/msuopa | R | false | true | 995 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/emr_operations.R
\name{add_emr_job_type}
\alias{add_emr_job_type}
\title{add_emr_job_type}
\usage{
add_emr_job_type(df, position_number_col_name = "Position Number",
suffix_col_name = "Suffix", mus_col_name = "MUS",
ecls_col_name = "PEAEMPL ECLS")
}
\arguments{
\item{df}{the dataframe containing the necessary columns and to which the new
column will be appended.}
\item{position_number_col_name}{the string containing the name of the
Position Number column}
\item{suffix_col_name}{the string containing the name of the
Suffix column}
\item{mus_col_name}{the string containing the name of the
MUS Contract Indicator column}
}
\value{
the original dataframe with a newly appended \code{EMRJobType} column
}
\description{
EMR Job Types are aggregated groups of employee classes. These are commonly
used as grouping variables for analyses. Job type derived from Position #,
Suffix, and MUS Contract indicator.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reflections.R
\name{writeMTZ}
\alias{writeMTZ}
\title{Write data to an MTZ file}
\usage{
writeMTZ(reflections, header, filename, title = NULL, batch_header = NULL)
}
\arguments{
\item{reflections}{A data frame containing all reflection
records in columns. This is usually derived
from modifications of a previously existing
data frame obtained using
\code{\link{readMTZ}}.}
\item{header}{A list whose components are other R objects. This
is normally derived from the reading of another
MTZ file using \code{\link{readMTZ}}. See further
details at \code{\link{readMTZHeader}}.}
\item{filename}{A character string. The path to a valid mtz
file. If a file with the same name exists, it
will be deleted.}
\item{title}{A character string. The character string
associated with the TITLE keyword in an MTZ file.
This feature makes it easy to quickly identify the
data file in \href{https://www.ccp4.ac.uk}{CCP4}
programs. Default (NULL) is for the output file
to have the same title as the input file.}
\item{batch_header}{A named list including information at data
collection time. This component is present
only for raw (unmerged) intensity data
produce after the diffraction images
integration. Merged MTZ reflection files
have \code{batch_header=NULL}.
Names and types depend on
the type of experiment (more information
on this can be found at
\href{https://www.ccp4.ac.uk}{CCP4}.)}
}
\value{
This function does not return any R object. It outputs
an MTZ reflection file to some target location.
}
\description{
Write reflections and experimental information
to an MTZ file
}
\examples{
# Read the 1dei_phases data included in the package
datadir <- system.file("extdata",package="cry")
filename <- file.path(datadir,"1dei_phases.mtz")
lMTZ <- readMTZ(filename)
# Change dataset name
print(lMTZ$header$DATASET)
lMTZ$header$DATASET[2,2] <- "New CRY dataset"
# Add one HISTORY line (string has to be 80-letters long)
addhist <- "From CRY 0.3.0 - run on Apr 2 20:12:00 2021"
n <- nchar(addhist)
nblanks <- 80-n
for (i in 1:nblanks) addhist <- paste0(addhist," ")
lMTZ$header$HISTORY <- c(lMTZ$header$HISTORY,addhist)
# Write to a new MTZ file
wd <- tempdir()
fname <- file.path(wd,"new.mtz")
writeMTZ(lMTZ$reflections,lMTZ$header,fname)
}
| /man/writeMTZ.Rd | no_license | jfoadi/cry | R | false | true | 2,327 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reflections.R
\name{writeMTZ}
\alias{writeMTZ}
\title{Write data to an MTZ file}
\usage{
writeMTZ(reflections, header, filename, title = NULL, batch_header = NULL)
}
\arguments{
\item{reflections}{A data frame containing all reflection
records in columns. This is usually derived
from modifications of a previously existing
data frame obtained using
\code{\link{readMTZ}}.}
\item{header}{A list whose components are other R objects. This
is normally derived from the reading of another
MTZ file using \code{\link{readMTZ}}. See further
details at \code{\link{readMTZHeader}}.}
\item{filename}{A character string. The path to a valid mtz
file. If a file with the same name exists, it
will be deleted.}
\item{title}{A character string. The character string
associated with the TITLE keyword in an MTZ file.
This feature makes it easy to quickly identify the
data file in \href{https://www.ccp4.ac.uk}{CCP4}
programs. Default (NULL) is for the output file
to have the same title as the input file.}
\item{batch_header}{A named list including information at data
collection time. This component is present
only for raw (unmerged) intensity data
produce after the diffraction images
integration. Merged MTZ reflection files
have \code{batch_header=NULL}.
Names and types depend on
the type of experiment (more information
on this can be found at
\href{https://www.ccp4.ac.uk}{CCP4}.)}
}
\value{
This function does not return any R object. It outputs
an MTZ reflection file to some target location.
}
\description{
Write reflections and experimental information
to an MTZ file
}
\examples{
# Read the 1dei_phases data included in the package
datadir <- system.file("extdata",package="cry")
filename <- file.path(datadir,"1dei_phases.mtz")
lMTZ <- readMTZ(filename)
# Change dataset name
print(lMTZ$header$DATASET)
lMTZ$header$DATASET[2,2] <- "New CRY dataset"
# Add one HISTORY line (string has to be 80-letters long)
addhist <- "From CRY 0.3.0 - run on Apr 2 20:12:00 2021"
n <- nchar(addhist)
nblanks <- 80-n
for (i in 1:nblanks) addhist <- paste0(addhist," ")
lMTZ$header$HISTORY <- c(lMTZ$header$HISTORY,addhist)
# Write to a new MTZ file
wd <- tempdir()
fname <- file.path(wd,"new.mtz")
writeMTZ(lMTZ$reflections,lMTZ$header,fname)
}
|
/practice1.r | no_license | Buchfink/Python_hes_coursera | R | false | false | 1,971 | r | ||
#' Efficient by-group (weighted) summation
#'
#' @description \code{sum_by} performs an efficient and optionally weighted
#' by-group summation by using linear algebra and the Matrix package
#' capabilities. The by-group summation is performed through matrix cross-product
#' of the \code{y} parameter (coerced to a matrix if needed) with a (very) sparse
#' matrix built up using the \code{by} and the (optional) \code{w} parameters.
#'
#' Compared to base R, dplyr or data.table alternatives, this implementation
#' aims at being easier to use in a matrix-oriented context and can yield
#' efficiency gains when the number of columns becomes high.
#'
#' @param y A (sparse) vector, a (sparse) matrix or a data.frame.
#' The object to perform by-group summation on.
#' @param by The factor variable defining the by-groups. Character variables
#' are coerced to factors.
#' @param w The optional row weights to be used in the summation.
#' @param na_rm Should \code{NA} values in \code{y} be removed (ie treated as 0 in the summation) ?
#' Similar to \code{na.rm} argument in \code{\link[base]{sum}}, but \code{TRUE} by default.
#' If \code{FALSE}, \code{NA} values in \code{y} produce \code{NA} values in the result.
#' @param keep_sparse When \code{y} is a sparse vector or a sparse matrix, should the result
#' also be sparse ? \code{FALSE} by default. As \code{\link[Matrix]{sparseVector-class}} does
#' not have a name attribute, when \code{y} is a sparseVector the result does not have any
#' name (and a warning is cast).
#'
#' @return A vector, a matrix or a data.frame depending on the type of \code{y}. If \code{y} is
#' sparse and \code{keep_sparse = TRUE}, then the result is also sparse (without names
#' when it is a sparse vector, see keep_sparse argument for details).
#'
#' @author Martin Chevalier
#'
#' @examples # Data generation
#' set.seed(1)
#' n <- 100
#' p <- 10
#' H <- 3
#' y <- matrix(rnorm(n*p), ncol = p, dimnames = list(NULL, paste0("var", 1:10)))
#' y[1, 1] <- NA
#' by <- letters[sample.int(H, n, replace = TRUE)]
#' w <- rep(1, n)
#' w[by == "a"] <- 2
#'
#' # Standard use
#' sum_by(y, by)
#'
#' # Keeping the NAs
#' sum_by(y, by, na_rm = FALSE)
#'
#' # With a weight
#' sum_by(y, by, w = w)
#'
#' @export
#' @import Matrix
sum_by <- function(y, by, w = NULL, na_rm = TRUE, keep_sparse = FALSE){
# y <- V
# Type of y
class_y <- class(y)
is_data.frame_y <- is.data.frame(y)
if(is_data.frame_y) y <- as.matrix(y)
is_sparse_y <- inherits(y, c("Matrix", "sparseVector"))
is_vector_y <- is.null(dim(y))
is_numeric_y <- is.numeric(if(!is_sparse_y) y else y@x)
if(!is_numeric_y) stop("y is not numeric (or not entirely).")
if(!is_sparse_y | is_vector_y) y <- methods::as(y, "sparseMatrix")
# Weight, NA in y
if(is.null(w)) w <- rep(1, NROW(y))
if(!is.numeric(w)) stop("w is not numeric")
if(na_rm) y[is.na(y)] <- 0
# NA in by
NA_in_by <- is.na(by)
if(any(NA_in_by)){
y <- y[!NA_in_by, , drop = FALSE]
by <- by[!NA_in_by]
w <- w[!NA_in_by]
}
# Matrix cross-product
by <- as.factor(by)
x <- make_block(w, by)
colnames(x) <- levels(by)
r <- crossprod(x, y)
# Type of r
if(!is_sparse_y | !keep_sparse){
r <- if(is_vector_y) stats::setNames(as.vector(r), rownames(r)) else as.matrix(r)
}else{
if(is_vector_y) warn("sparseVector can't have names, hence the output won't have names.")
r <- methods::as(r, class_y)
}
if(is_data.frame_y) r <- as.data.frame(r)
r
}
#' Expand a matrix or a data.frame with zeros based on rownames matching
#'
#' @description For a given two-dimensional object with rownames and a character
#' vector, \code{add_zero} produces a corresponding object whose rownames match
#' the character vector, with zeros on the additional rows.
#'
#' This function is an easy-to-use and reliable way to reintroduce
#' non-responding units in the variance estimation process (after the
#' non-response phase is taken into account).
#'
#' @param y A (sparse) matrix or a data.frame. The object to add zeros to.
#' @param rownames A character vector (other types are coerced to character).
#' The character vector giving the rows of the produced object.
#' @param remove Should rows of \code{y} whose name do not appear in the rownames
#' argument be removed ? TRUE by default, a warning is shown when rows are
#' removed.
#'
#' @return A (sparse) matrix or data.frame depending on the type of \code{y}.
#'
#' @author Martin Chevalier
#'
#' @examples # Data generation
#' set.seed(1)
#' n <- 10
#' p <- 2
#' y <- matrix(1:(n*p), ncol = p, dimnames = list(sample(letters, n)))
#' y[c(3, 8, 12)] <- NA
#' rownames <- letters
#'
#' # Standard use
#' add_zero(y, rownames)
#'
#' # Use when rownames in y do not match
#' # any element in the rownames argument
#' rownames(y)[1:3] <- toupper(rownames(y)[1:3])
#' add_zero(y, rownames)
#' add_zero(y, rownames, remove = FALSE)
#'
#' @import Matrix
#' @export
#'
add_zero <- function(y, rownames, remove = TRUE){
# y <- m; rownames <- letters
# Type of y
class_y <- class(y)
is_data.frame_y <- is.data.frame(y)
if(is_data.frame_y) y <- as.matrix(y)
if(is.null(dim(y)))
stop("y must be a (sparse) matrix or a data.frame.")
if(is.null(rownames(y)))
stop("y must have rownames in order to be used in add_zero().")
is_sparse_y <- inherits(y, c("Matrix", "sparseVector"))
is_numeric_y <- is.numeric(if(!is_sparse_y) y else y@x)
if(!is_numeric_y) stop("y is not numeric (or not entirely).")
# Prepare rownames argument
rownames <- rownames[!is.na(rownames)]
rownames <- as.character(rownames)
# Expand y with 0 in order to get an object whose rownames
# are the character argument rownames (in the same order)
compl <- setdiff(rownames, rownames(y))
if(!is_sparse_y){
r <- rbind(y, matrix(0, nrow = length(compl), ncol = NCOL(y), dimnames = list(compl)))
if(is_data.frame_y) r <- as.data.frame(r)
}else{
r <- rbind(y, Matrix(0, nrow = length(compl), ncol = NCOL(y), dimnames = list(compl, NULL)))
r <- methods::as(r, class_y)
}
# Remove rows that do not match any element in rownames
# if remove is TRUE
if(remove){
if(length(setdiff(rownames(y), rownames)))
warn("The name of some rows in y do not match any element in the rownames argument. These rows are removed from the result (use remove = FALSE to change this behaviour).")
o <- rownames
}else o <- order(rownames(r))
r[o, , drop = FALSE]
}
# TODO: Export and document make_block()
make_block <- function(y, by){
# Step 1: Prepare the by argument
by <- droplevels(as.factor(by))
H <- length(levels(by))
if(H == 1) return(y)
# Step 2: Coerce y to a TsparseMatrix and remove NA values
res <- coerce_to_TsparseMatrix(y)
if(any(is.na(by))){
na <- is.na(res@j)
res@x <- res@x[!na]
res@i <- res@i[!na]
res@j <- res@j[!na]
}
# Step 3: Adjust the y and Dim slots in order to obtain the block matrix
p <- NCOL(res)
res@Dimnames[2] <- list(NULL)
res@j <- as.integer(((as.numeric(by) - 1) * p)[res@i + 1] + res@j)
res@Dim <- c(res@Dim[1], as.integer(res@Dim[2] * H))
# Step 4: Export the result with relevant attributes
attr(res, "rowby") <- as.character(by)
attr(res, "colby") <- as.character(rep(levels(by), each = p))
res
}
# Unexported (and undocumented) functions
# From devtools (https://github.com/r-lib/devtools/blob/master/R/utils.r)
"%||%" <- function(a, b) if (!is.null(a)) a else b
coerce_to_TsparseMatrix <- function(y){
if(is.null(dim(y))){
names_y <- names(y)
res <- Matrix::sparseMatrix(
x = unname(y), i = seq_along(y), j = rep(1, length(y)), giveCsparse = FALSE
)
if(!is.null(names_y)) rownames(res) <- names_y
}else if(!methods::is(y,"TsparseMatrix")){
dimnames_y <- dimnames(y)
res <- methods::as(y, "TsparseMatrix")
if(!is.null(dimnames_y)) dimnames(res) <- dimnames_y
}else res <- y
res
}
detect_block <- function(y, by){
by <- droplevels(as.factor(by))
y_bool <- coerce_to_TsparseMatrix(y) != 0
by_bool <- make_block(rep(TRUE, NROW(y)), by)
prod <- crossprod(by_bool, y_bool)
prod_bool <- prod > 0
if(!all(colSums(prod_bool) <= 1)) return(NULL)
attr(y, "rowby") <- as.character(by)
attr(y, "colby") <- rep(levels(by), NCOL(prod_bool))[as.vector(prod_bool)]
y
}
change_enclosing <- function(FUN, envir = environment(FUN)){
eval(parse(text = deparse(FUN)), envir = envir)
}
assign_all <- function(objects, to, from = parent.frame(), not_closure = c(list(globalenv()), sys.frames())){
for(n in objects){
get_n <- get(n, from)
if(!is.function(get_n)){
assign(n, get_n, envir = to)
}else{
tmp <- new.env(parent = to)
env_n <- environment(get_n)
not_closure <- c(not_closure, from)
is_closure <- !any(sapply(not_closure, identical, env_n))
if(is_closure)
assign_all(ls(env_n, all.names = TRUE), to = tmp, from = env_n, not_closure = not_closure)
assign(n, change_enclosing(get_n, envir = tmp), envir = to)
}
}
}
is_error <- function(expr)
inherits(try(expr, silent = TRUE), "try-error")
is_variable_name <- function(param, data = NULL, max_length = 1)
is.character(param) &&
(is.null(data) || length(setdiff(param, names(data))) == 0) &&
length(param) > 0 && length(param) <= max_length
variable_not_in_data <- function(var, data){
if(is.null(var)) return(NULL)
tmp <- var[!(var %in% names(data))]
if(length(tmp) == 0) return(NULL)
tmp
}
replace_variable_name_with_symbol <- function(arg_list, data, single = TRUE){
# TODO: Allow consistent evaluation through parent frames
# TODO: Handle the case of apparent name without match in data variable names
tmp <- lapply(arg_list, function(a){
if(is_error(a_eval <- eval(a, envir = data))){
a_out <- list(a)
}else if(is_variable_name(a_eval, data = data, max_length = Inf)){
if(single && !is_variable_name(a_eval, data = data, max_length = 1))
stop("Only single variable names are allowed for the by argument.")
a_out <- lapply(a_eval, as.symbol)
}else a_out <- list(a)
a_out
})
if(!single){
tmp_length <- sapply(tmp, length)
if(!all(tmp_length %in% c(1, max(tmp_length))))
stop("Some arguments have longer variable vectors than others.")
tmp[tmp_length == 1] <-
lapply(tmp[tmp_length == 1], `[`, rep(1, max(tmp_length)))
}else if(length(tmp) == 1) tmp[1] <- tmp[[1]]
tmp
}
warn <- function(...) warning(..., "\n", call. = FALSE, immediate. = TRUE)
note <- function(...) message("Note: ", ..., "\n")
is_statistic_wrapper <- function(x) inherits(x, "gustave_statistic_wrapper")
names_else_NA <- function(x){
if(is.null(names(x))) rep(NA, length(x)) else{
tmp <- names(x)
tmp[tmp %in% ""] <- NA
tmp
}
}
discretize_qualitative_var <- function(var, logical = FALSE){
var <- droplevels(as.factor(var))
result <- Matrix(nrow = length(var), ncol = length(levels(var)))
result[!is.na(var), ] <- Matrix::sparse.model.matrix(~ var - 1)
result[is.na(var), ] <- NA
if(!logical) result <- result * 1
rownames(result) <- names(var)
colnames(result) <- levels(var)
result
}
get_through_parent_frame <- function(x){
n <- 0
found <- NULL
while(is.null(found) || identical(baseenv(), parent.frame(n))){
n <- n + 1
found <- get0("execution_envir", parent.frame(n))
}
found
}
display_only_n_first <- function(x,
n = 10,
collapse = ", ",
final_text = paste0(" and ", length(x) - n, " more")
){
if(length(x) <= n){
paste(x, collapse = collapse)
}else{
paste0(paste(x[1:n], collapse = collapse), final_text)
}
}
rbind_output_df <- function(list_output_df){
names <- unique(do.call(base::c, lapply(list_output_df, names)))
output_df <- do.call(rbind, lapply(list_output_df, function(i){
i[, setdiff(names, names(i))] <- NA
i[, names]
}))
output_df <- output_df[, sapply(output_df, function(i) !all(is.na(i)))]
rownames(output_df) <- NULL
output_df
}
| /R/utils.R | no_license | avouacr/gustave | R | false | false | 12,162 | r |
#' Efficient by-group (weighted) summation
#'
#' @description \code{sum_by} performs an efficient and optionally weighted
#' by-group summation by using linear algebra and the Matrix package
#' capabilities. The by-group summation is performed through matrix cross-product
#' of the \code{y} parameter (coerced to a matrix if needed) with a (very) sparse
#' matrix built up using the \code{by} and the (optional) \code{w} parameters.
#'
#' Compared to base R, dplyr or data.table alternatives, this implementation
#' aims at being easier to use in a matrix-oriented context and can yield
#' efficiency gains when the number of columns becomes high.
#'
#' @param y A (sparse) vector, a (sparse) matrix or a data.frame.
#' The object to perform by-group summation on.
#' @param by The factor variable defining the by-groups. Character variables
#' are coerced to factors.
#' @param w The optional row weights to be used in the summation.
#' @param na_rm Should \code{NA} values in \code{y} be removed (ie treated as 0 in the summation) ?
#' Similar to \code{na.rm} argument in \code{\link[base]{sum}}, but \code{TRUE} by default.
#' If \code{FALSE}, \code{NA} values in \code{y} produce \code{NA} values in the result.
#' @param keep_sparse When \code{y} is a sparse vector or a sparse matrix, should the result
#' also be sparse ? \code{FALSE} by default. As \code{\link[Matrix]{sparseVector-class}} does
#' not have a name attribute, when \code{y} is a sparseVector the result does not have any
#' name (and a warning is cast).
#'
#' @return A vector, a matrix or a data.frame depending on the type of \code{y}. If \code{y} is
#' sparse and \code{keep_sparse = TRUE}, then the result is also sparse (without names
#' when it is a sparse vector, see keep_sparse argument for details).
#'
#' @author Martin Chevalier
#'
#' @examples # Data generation
#' set.seed(1)
#' n <- 100
#' p <- 10
#' H <- 3
#' y <- matrix(rnorm(n*p), ncol = p, dimnames = list(NULL, paste0("var", 1:10)))
#' y[1, 1] <- NA
#' by <- letters[sample.int(H, n, replace = TRUE)]
#' w <- rep(1, n)
#' w[by == "a"] <- 2
#'
#' # Standard use
#' sum_by(y, by)
#'
#' # Keeping the NAs
#' sum_by(y, by, na_rm = FALSE)
#'
#' # With a weight
#' sum_by(y, by, w = w)
#'
#' @export
#' @import Matrix
sum_by <- function(y, by, w = NULL, na_rm = TRUE, keep_sparse = FALSE){
# y <- V
# Type of y
class_y <- class(y)
is_data.frame_y <- is.data.frame(y)
if(is_data.frame_y) y <- as.matrix(y)
is_sparse_y <- inherits(y, c("Matrix", "sparseVector"))
is_vector_y <- is.null(dim(y))
is_numeric_y <- is.numeric(if(!is_sparse_y) y else y@x)
if(!is_numeric_y) stop("y is not numeric (or not entirely).")
if(!is_sparse_y | is_vector_y) y <- methods::as(y, "sparseMatrix")
# Weight, NA in y
if(is.null(w)) w <- rep(1, NROW(y))
if(!is.numeric(w)) stop("w is not numeric")
if(na_rm) y[is.na(y)] <- 0
# NA in by
NA_in_by <- is.na(by)
if(any(NA_in_by)){
y <- y[!NA_in_by, , drop = FALSE]
by <- by[!NA_in_by]
w <- w[!NA_in_by]
}
# Matrix cross-product
by <- as.factor(by)
x <- make_block(w, by)
colnames(x) <- levels(by)
r <- crossprod(x, y)
# Type of r
if(!is_sparse_y | !keep_sparse){
r <- if(is_vector_y) stats::setNames(as.vector(r), rownames(r)) else as.matrix(r)
}else{
if(is_vector_y) warn("sparseVector can't have names, hence the output won't have names.")
r <- methods::as(r, class_y)
}
if(is_data.frame_y) r <- as.data.frame(r)
r
}
#' Expand a matrix or a data.frame with zeros based on rownames matching
#'
#' @description For a given two-dimensional object with rownames and a character
#' vector, \code{add_zero} produces a corresponding object whose rownames match
#' the character vector, with zeros on the additional rows.
#'
#' This function is an easy-to-use and reliable way to reintroduce
#' non-responding units in the variance estimation process (after the
#' non-response phase is taken into account).
#'
#' @param y A (sparse) matrix or a data.frame. The object to add zeros to.
#' @param rownames A character vector (other types are coerced to character).
#' The character vector giving the rows of the produced object.
#' @param remove Should rows of \code{y} whose name do not appear in the rownames
#' argument be removed ? TRUE by default, a warning is shown when rows are
#' removed.
#'
#' @return A (sparse) matrix or data.frame depending on the type of \code{y}.
#'
#' @author Martin Chevalier
#'
#' @examples # Data generation
#' set.seed(1)
#' n <- 10
#' p <- 2
#' y <- matrix(1:(n*p), ncol = p, dimnames = list(sample(letters, n)))
#' y[c(3, 8, 12)] <- NA
#' rownames <- letters
#'
#' # Standard use
#' add_zero(y, rownames)
#'
#' # Use when rownames in y do not match
#' # any element in the rownames argument
#' rownames(y)[1:3] <- toupper(rownames(y)[1:3])
#' add_zero(y, rownames)
#' add_zero(y, rownames, remove = FALSE)
#'
#' @import Matrix
#' @export
#'
add_zero <- function(y, rownames, remove = TRUE){
# y <- m; rownames <- letters
# Type of y
class_y <- class(y)
is_data.frame_y <- is.data.frame(y)
if(is_data.frame_y) y <- as.matrix(y)
if(is.null(dim(y)))
stop("y must be a (sparse) matrix or a data.frame.")
if(is.null(rownames(y)))
stop("y must have rownames in order to be used in add_zero().")
is_sparse_y <- inherits(y, c("Matrix", "sparseVector"))
is_numeric_y <- is.numeric(if(!is_sparse_y) y else y@x)
if(!is_numeric_y) stop("y is not numeric (or not entirely).")
# Prepare rownames argument
rownames <- rownames[!is.na(rownames)]
rownames <- as.character(rownames)
# Expand y with 0 in order to get an object whose rownames
# are the character argument rownames (in the same order)
compl <- setdiff(rownames, rownames(y))
if(!is_sparse_y){
r <- rbind(y, matrix(0, nrow = length(compl), ncol = NCOL(y), dimnames = list(compl)))
if(is_data.frame_y) r <- as.data.frame(r)
}else{
r <- rbind(y, Matrix(0, nrow = length(compl), ncol = NCOL(y), dimnames = list(compl, NULL)))
r <- methods::as(r, class_y)
}
# Remove rows that do not match any element in rownames
# if remove is TRUE
if(remove){
if(length(setdiff(rownames(y), rownames)))
warn("The name of some rows in y do not match any element in the rownames argument. These rows are removed from the result (use remove = FALSE to change this behaviour).")
o <- rownames
}else o <- order(rownames(r))
r[o, , drop = FALSE]
}
# TODO: Export and document make_block()
make_block <- function(y, by){
# Step 1: Prepare the by argument
by <- droplevels(as.factor(by))
H <- length(levels(by))
if(H == 1) return(y)
# Step 2: Coerce y to a TsparseMatrix and remove NA values
res <- coerce_to_TsparseMatrix(y)
if(any(is.na(by))){
na <- is.na(res@j)
res@x <- res@x[!na]
res@i <- res@i[!na]
res@j <- res@j[!na]
}
# Step 3: Adjust the y and Dim slots in order to obtain the block matrix
p <- NCOL(res)
res@Dimnames[2] <- list(NULL)
res@j <- as.integer(((as.numeric(by) - 1) * p)[res@i + 1] + res@j)
res@Dim <- c(res@Dim[1], as.integer(res@Dim[2] * H))
# Step 4: Export the result with relevant attributes
attr(res, "rowby") <- as.character(by)
attr(res, "colby") <- as.character(rep(levels(by), each = p))
res
}
# Unexported (and undocumented) functions
# From devtools (https://github.com/r-lib/devtools/blob/master/R/utils.r)
"%||%" <- function(a, b) if (!is.null(a)) a else b
coerce_to_TsparseMatrix <- function(y){
if(is.null(dim(y))){
names_y <- names(y)
res <- Matrix::sparseMatrix(
x = unname(y), i = seq_along(y), j = rep(1, length(y)), giveCsparse = FALSE
)
if(!is.null(names_y)) rownames(res) <- names_y
}else if(!methods::is(y,"TsparseMatrix")){
dimnames_y <- dimnames(y)
res <- methods::as(y, "TsparseMatrix")
if(!is.null(dimnames_y)) dimnames(res) <- dimnames_y
}else res <- y
res
}
detect_block <- function(y, by){
by <- droplevels(as.factor(by))
y_bool <- coerce_to_TsparseMatrix(y) != 0
by_bool <- make_block(rep(TRUE, NROW(y)), by)
prod <- crossprod(by_bool, y_bool)
prod_bool <- prod > 0
if(!all(colSums(prod_bool) <= 1)) return(NULL)
attr(y, "rowby") <- as.character(by)
attr(y, "colby") <- rep(levels(by), NCOL(prod_bool))[as.vector(prod_bool)]
y
}
change_enclosing <- function(FUN, envir = environment(FUN)){
eval(parse(text = deparse(FUN)), envir = envir)
}
assign_all <- function(objects, to, from = parent.frame(), not_closure = c(list(globalenv()), sys.frames())){
for(n in objects){
get_n <- get(n, from)
if(!is.function(get_n)){
assign(n, get_n, envir = to)
}else{
tmp <- new.env(parent = to)
env_n <- environment(get_n)
not_closure <- c(not_closure, from)
is_closure <- !any(sapply(not_closure, identical, env_n))
if(is_closure)
assign_all(ls(env_n, all.names = TRUE), to = tmp, from = env_n, not_closure = not_closure)
assign(n, change_enclosing(get_n, envir = tmp), envir = to)
}
}
}
is_error <- function(expr)
inherits(try(expr, silent = TRUE), "try-error")
is_variable_name <- function(param, data = NULL, max_length = 1)
is.character(param) &&
(is.null(data) || length(setdiff(param, names(data))) == 0) &&
length(param) > 0 && length(param) <= max_length
variable_not_in_data <- function(var, data){
if(is.null(var)) return(NULL)
tmp <- var[!(var %in% names(data))]
if(length(tmp) == 0) return(NULL)
tmp
}
replace_variable_name_with_symbol <- function(arg_list, data, single = TRUE){
# TODO: Allow consistent evaluation through parent frames
# TODO: Handle the case of apparent name without match in data variable names
tmp <- lapply(arg_list, function(a){
if(is_error(a_eval <- eval(a, envir = data))){
a_out <- list(a)
}else if(is_variable_name(a_eval, data = data, max_length = Inf)){
if(single && !is_variable_name(a_eval, data = data, max_length = 1))
stop("Only single variable names are allowed for the by argument.")
a_out <- lapply(a_eval, as.symbol)
}else a_out <- list(a)
a_out
})
if(!single){
tmp_length <- sapply(tmp, length)
if(!all(tmp_length %in% c(1, max(tmp_length))))
stop("Some arguments have longer variable vectors than others.")
tmp[tmp_length == 1] <-
lapply(tmp[tmp_length == 1], `[`, rep(1, max(tmp_length)))
}else if(length(tmp) == 1) tmp[1] <- tmp[[1]]
tmp
}
warn <- function(...) warning(..., "\n", call. = FALSE, immediate. = TRUE)
note <- function(...) message("Note: ", ..., "\n")
is_statistic_wrapper <- function(x) inherits(x, "gustave_statistic_wrapper")
names_else_NA <- function(x){
if(is.null(names(x))) rep(NA, length(x)) else{
tmp <- names(x)
tmp[tmp %in% ""] <- NA
tmp
}
}
discretize_qualitative_var <- function(var, logical = FALSE){
var <- droplevels(as.factor(var))
result <- Matrix(nrow = length(var), ncol = length(levels(var)))
result[!is.na(var), ] <- Matrix::sparse.model.matrix(~ var - 1)
result[is.na(var), ] <- NA
if(!logical) result <- result * 1
rownames(result) <- names(var)
colnames(result) <- levels(var)
result
}
get_through_parent_frame <- function(x){
n <- 0
found <- NULL
while(is.null(found) || identical(baseenv(), parent.frame(n))){
n <- n + 1
found <- get0("execution_envir", parent.frame(n))
}
found
}
display_only_n_first <- function(x,
n = 10,
collapse = ", ",
final_text = paste0(" and ", length(x) - n, " more")
){
if(length(x) <= n){
paste(x, collapse = collapse)
}else{
paste0(paste(x[1:n], collapse = collapse), final_text)
}
}
rbind_output_df <- function(list_output_df){
names <- unique(do.call(base::c, lapply(list_output_df, names)))
output_df <- do.call(rbind, lapply(list_output_df, function(i){
i[, setdiff(names, names(i))] <- NA
i[, names]
}))
output_df <- output_df[, sapply(output_df, function(i) !all(is.na(i)))]
rownames(output_df) <- NULL
output_df
}
|
#' A myGO function
#'
#' This allows you to do Deinococcus radiodurans ontology analysis. The funciton generates a data frame with gene symbol, refID, gene ID, Differential expression pattern, annotation from NCBI, GO terms from Panther.
#'
#' The data base is based on all the genes that have hits in PATHNER from our previous analysis
#'
#' You need MF, BP, CC, PC for database construction for this function to run
#'
#' You need to input the annotation dataset from annotation.rmd
#'
#' write = T to write a csv file with all the genes in the ontology database
#'
#' @examples
#' \dontrun{myGO(MF, BP, CC, PC, edgeR_34hr, FALSE)}
#'
#' @export
myGO <- function(MF, BP, CC, PC, annoset, write){
mf <- MF[,-1]
bp <- BP[,-1]
cc <- CC[,-1]
pc <- PC[,-1]
#colname consistence
colN <- colnames(pc)
colnames(mf) <- colN
colnames(bp) <- colN
#remove duplicate rows
d_mf <- duplicated(mf)
d_bp <- duplicated(bp)
d_cc <- duplicated(cc)
d_pc <- duplicated(pc)
mf2 <- mf[!d_mf,]
bp2 <- bp[!d_bp,]
cc2 <- cc[!d_cc,]
pc2 <- pc[!d_pc,]
# global ontology dataset
combined2 <- rbind(mf2, bp2, cc2, pc2)
duplicat <- duplicated(combined2)
# cleaned ontology dataset
GO.dra.db <- combined2[!duplicat,]
if(write == TRUE){
write.csv(combined2, "[dra]withduplicate.goterm.csv")
write.csv(GO.dra.db, "[GO]dra.database.csv")
} else {
warning("Not to produce the ontology library in the same filepath.")
}
reGO<- function(annoset, goterm){
anno <- read.csv(annoset, stringsAsFactors = FALSE, strip.white = T)[,-1]
anno <- as.data.frame(apply(anno, 2, trimws, "both"), stringsAsFactors = FALSE)
anno.refID <- anno$refseq_locus
go <- as.data.frame(apply(goterm, 2, as.character), stringsAsFactors = FALSE)
go <- as.data.frame(apply(go, 2, trimws, "both"), stringsAsFactors = FALSE)
go[is.na(go$symbol),]$symbol <- "No Match"
go.refID <- go$refID
# from anno set
geneid <- vector()
dexpr <- vector()
notation <- vector()
# match first
k <- go.refID %in% anno.refID
set <- go[which(k == TRUE), ]
for (i in 1:nrow(set)){
#from annotation dataset
geneid[i] <- anno[anno$refseq_locus == set$refID[i],]$geneID
dexpr[i] <- anno[anno$refseq_locus == set$refID[i],]$de
notation[i] <- anno[anno$refseq_locus == set$refID[i],]$annotation
}
df <- as.data.frame(cbind( symbol = set$symbol,
refID = set$refID,
geneID = geneid,
DE = dexpr,
annotation = notation,
type = set$type,
goTerm = set$Term))
#clean the duplicates
dup <- duplicated(df)
df <- df[!dup,]
df
}
goterm <- GO.dra.db
mygo <- reGO(annoset, goterm)
mygo
}
| /DraOnto.db/R/myGO.R | no_license | xpingli/DraOnto.db | R | false | false | 3,820 | r | #' A myGO function
#'
#' This allows you to do Deinococcus radiodurans ontology analysis. The funciton generates a data frame with gene symbol, refID, gene ID, Differential expression pattern, annotation from NCBI, GO terms from Panther.
#'
#' The data base is based on all the genes that have hits in PATHNER from our previous analysis
#'
#' You need MF, BP, CC, PC for database construction for this function to run
#'
#' You need to input the annotation dataset from annotation.rmd
#'
#' write = T to write a csv file with all the genes in the ontology database
#'
#' @examples
#' \dontrun{myGO(MF, BP, CC, PC, edgeR_34hr, FALSE)}
#'
#' @export
myGO <- function(MF, BP, CC, PC, annoset, write){
mf <- MF[,-1]
bp <- BP[,-1]
cc <- CC[,-1]
pc <- PC[,-1]
#colname consistence
colN <- colnames(pc)
colnames(mf) <- colN
colnames(bp) <- colN
#remove duplicate rows
d_mf <- duplicated(mf)
d_bp <- duplicated(bp)
d_cc <- duplicated(cc)
d_pc <- duplicated(pc)
mf2 <- mf[!d_mf,]
bp2 <- bp[!d_bp,]
cc2 <- cc[!d_cc,]
pc2 <- pc[!d_pc,]
# global ontology dataset
combined2 <- rbind(mf2, bp2, cc2, pc2)
duplicat <- duplicated(combined2)
# cleaned ontology dataset
GO.dra.db <- combined2[!duplicat,]
if(write == TRUE){
write.csv(combined2, "[dra]withduplicate.goterm.csv")
write.csv(GO.dra.db, "[GO]dra.database.csv")
} else {
warning("Not to produce the ontology library in the same filepath.")
}
reGO<- function(annoset, goterm){
anno <- read.csv(annoset, stringsAsFactors = FALSE, strip.white = T)[,-1]
anno <- as.data.frame(apply(anno, 2, trimws, "both"), stringsAsFactors = FALSE)
anno.refID <- anno$refseq_locus
go <- as.data.frame(apply(goterm, 2, as.character), stringsAsFactors = FALSE)
go <- as.data.frame(apply(go, 2, trimws, "both"), stringsAsFactors = FALSE)
go[is.na(go$symbol),]$symbol <- "No Match"
go.refID <- go$refID
# from anno set
geneid <- vector()
dexpr <- vector()
notation <- vector()
# match first
k <- go.refID %in% anno.refID
set <- go[which(k == TRUE), ]
for (i in 1:nrow(set)){
#from annotation dataset
geneid[i] <- anno[anno$refseq_locus == set$refID[i],]$geneID
dexpr[i] <- anno[anno$refseq_locus == set$refID[i],]$de
notation[i] <- anno[anno$refseq_locus == set$refID[i],]$annotation
}
df <- as.data.frame(cbind( symbol = set$symbol,
refID = set$refID,
geneID = geneid,
DE = dexpr,
annotation = notation,
type = set$type,
goTerm = set$Term))
#clean the duplicates
dup <- duplicated(df)
df <- df[!dup,]
df
}
goterm <- GO.dra.db
mygo <- reGO(annoset, goterm)
mygo
}
|
library(purrr)
library(tmap)
# library(stars)
library(ggplot2)
theme_set(theme_bw())
# library(tcruziutils)
devtools::load_all("../../tcruziutils")
library(dplyr)
library(parallel)
# spatial
# country polygons
countries <- readRDS("../../preprocessing/countries.Rds")
# raster brick with covariate values within endimic zone
env_grids <- raster::brick("../../preprocessing/env_grids.grd")
# island polygons to be excluded from analysis
exclude <- rgdal::readOGR("../../polygon boundaries/islands_americas.shp")
# data held out for evaluation (interpolation performance)
# used here for final model fit (but does not affect evaluation that was based
# on models fit without this data)
ev_df <- readRDS("../../preprocessing/presence_vector_evaluation.Rds")
# preprocessed data with presence/background data and spatial folds
# each element containt data for one species
folds <- readRDS("../../preprocessing/folds_list_vector_presence.Rds")
# model results ( each element contains results for one species )
# this was used for evaluation
# see cross-val-gam-tgb.Rds
cv_res <- readRDS("cv-results.Rds")
# model specifications used for analysis
settings_df <- purrr::cross_df(list(
type = c("linear", "smooth", "geospatial"),
GP = c(FALSE, TRUE)))
# remove species that errored (and category "other")
ind_keep <- map_lgl(cv_res, ~class(.x)[1] != "try-error") &
# map_lgl(folds, ~ (sum(.x$train$presence) + sum(.x$test$presence)) >= 100) &
map_lgl(folds, ~ !grepl("other", .x$species))
cv_res <- cv_res[ind_keep]
folds <- folds[ind_keep]
## smry table of best settings
settings_best <- map_dfr(
names(cv_res),
~{
print(.x)
get_spec_smry_df(cv_res, folds, settings_df, .x)
})
settings_best <- settings_best %>%
dplyr::mutate(
block_gp_ratio = block_width / gp_range,
range_gp_ratio = range / gp_range,
block_range_ratio = block_width / range)
settings_best
species_keep <- settings_best %>% pull(species)
cv_res <- cv_res[species_keep]
folds <- folds[species_keep]
## the best model is refit using all data, including data used previously for
# model validation (fold with blocks numbered 5 and random evaluation data)
pred_list <- mclapply(
seq_along(cv_res),
function(ind) {
.x <- cv_res[[ind]]
.y <- folds[[ind]]
print(.y$species)
hull <- .y$hull
# mask <- .y$hull %>% raster::crop(exclude)
ndf <- env_grids %>% grid_to_df(.y$hull, .y$hull)
ndf$species <- .y$species
bmod <- get_best_mod(.x)
form <- formula(bmod)
## refit with data from all blocks (1-5) + random hold-out for final prediction
# add block 5 to data
ndata <- rbind(.y$train, .y$test)
# add hold-out data
ndata_ev <- as_spatial(ev_df)[.y$hull, ]
ndata_ev$fold <- NA
ndata <- rbind(ndata_ev, ndata)
ndata$presence <- 0
ndata$presence <- 1L * (ndata$species == .y$species)
## readjust weights
ndata$wght <- 1
ndata$wght[ndata$presence == 0] <- sum(ndata$presence)/sum(!ndata$presence)
opt_gamma <- settings_best %>%
filter(species == .y$species) %>% pull(gamma)
bmod <- update(bmod, formula = form, data = as.data.frame(ndata),
gamma = opt_gamma)
pred <- predict(
bmod,
ndf,
type = "link",
discrete = FALSE,
se = TRUE)
ndf$prediction <- as.numeric(exp(pred$fit) / (1 + exp(pred$fit)))
ndf$ci_lower <- as.numeric(exp(pred$fit - 2 * pred$se) /
(1 + exp(pred$fit - 2 * pred$se)))
ndf$ci_upper <- as.numeric(exp(pred$fit + 2 * pred$se) /
(1 + exp(pred$fit + 2 * pred$se)))
nas <- rowSums(is.na(ndf[, c("prediction", "ci_lower", "ci_upper")])) > 0
if( sum(nas) > 0) {
ndf[nas,] <- NA
}
ndf <- ndf %>% filter(!is.na(prediction))
grid_prediction <- df_to_grid(ndf, env_grids[[1]], column = "prediction") %>%
raster::mask(exclude, inverse = TRUE)
grid_cilower <- df_to_grid(ndf, env_grids[[1]], column = "ci_lower") %>%
raster::mask(exclude, inverse = TRUE)
grid_ciupper <- df_to_grid(ndf, env_grids[[1]], column = "ci_upper") %>%
raster::mask(exclude, inverse = TRUE)
names(grid_prediction) <- names(grid_cilower) <- names(grid_ciupper) <-
.y$species
gc()
list(
species = .y$species,
final_mod = bmod,
prediction = grid_prediction,
ci_lower = grid_cilower,
ci_upper = grid_ciupper)
}, mc.cores = 5)
# store the final model within cv_res object
for(i in seq_along(pred_list)) {
cv_res[[pred_list[[i]]$species]]$final_mod <- pred_list[[i]]$final_mod
}
saveRDS(cv_res, "cv-results-final.Rds")
# combine raster for different species into raster brick
pred_brick <- pred_list %>% map(~.x$prediction) %>% reduce(raster::addLayer)
cil_brick <- pred_list %>% map(~.x$ci_lower) %>% reduce(raster::addLayer)
ciu_brick <- pred_list %>% map(~.x$ci_upper) %>% reduce(raster::addLayer)
# these are the final maps available online
raster::brick(
pred_brick,
filename = "predicted-maps-tgb-revision.grd",
bylayer = FALSE,
format = "raster",
overwrite = TRUE)
raster::brick(
cil_brick,
filename = "predicted-cil-tgb-revision.grd",
bylayer = FALSE,
format = "raster",
overwrite = TRUE)
raster::brick(
ciu_brick,
filename = "predicted-ciu-tgb-revision.grd",
bylayer = FALSE,
format = "raster",
overwrite = TRUE)
# save predicted maps (as pdf and png)
for (ind in seq_along(cv_res)) {
.x <- cv_res[[ind]]
.y <- folds[[ind]]
print(.y$species)
hull <- .y$hull
spec <- sub(" ", ".", .y$species)
# mask <- .y$hull %>% raster::crop(exclude)
shp <- countries %>% raster::crop(raster::extent(hull))
pred_grid <- raster::brick("predicted-maps-tgb-revision.grd")[[spec]] %>%
raster::crop(raster::extent(hull))
pred_map <- tm_shape(shp) +
tm_borders(alpha = .8) +
tm_shape(pred_grid) +
tm_raster(
alpha = .7, style = "cont", palette = viridis::magma(1e3),
breaks = seq(0, 1, by = .2), title = "prediction") +
tm_layout(
title = .y$species,
title.size = 2.5,
title.fontface = "italic",
legend.position = c("left", "bottom"),
legend.text.size = 1.2,
legend.hist.size = 1.2,
legend.title.size = 1.5)
path <- paste0("figures/final-predictions/pred-map-final-",
sub(" ", "-", .y$species), ".pdf")
tmap_save(pred_map, filename = path, width = 7, height = 7)
tmap_save(pred_map,
filename = paste0("figures/final-predictions/pred-map-final-",
sub(" ", "-", .y$species), ".png"), width = 7, height = 7)
}
################################################################################
############################# Bivariate Maps ###################################
################################################################################
# color palette
pal <- matrix(pals::arc.bluepink(), nrow = 4, ncol = 4)
# use some sensible cut offs for prediction
# .5 is baseline because presence/background are reweighted to have equal weights
# thus, probs above indicate presence liklier than absence.
cut_fit = c(0, .25, .5, .75, 1)
# SE of >=.3 implies that lower and upper CI are in different categories of the
# prediction as defined above, then go down in steps of 1/2
cut_se = c(0, .075, .15, .3, 1)
# loop over species and create bivariate maps
for(i in seq_along(cv_res)) {
cv <- cv_res[[i]]
species_i <- cv$species
print(species_i)
fold <- folds[[species_i]]
tm_bv <- tm_bivar_raster(
path_pred = "predicted-maps-tgb-revision.grd",
path_ciu = "predicted-ciu-tgb-revision.grd",
path_cil = "predicted-cil-tgb-revision.grd",
fold,
palette = as.vector(t(pal)),
add_points_after = FALSE,
cut_fit = cut_fit,
cut_se = cut_se
)
tm_bv_drawn <- tm_bivar_draw(tm_bv, x = .75, y = .1, width = .2, height = .2
)
path <- paste0("figures/bivariate_maps/map_bivar_", sub(" ", "_", species_i))
png(paste0(path, ".png"), width = 600, height = 600)
print(tm_bv_drawn)
dev.off()
pdf(paste0(path, ".pdf"), width = 7, height = 7)
print(tm_bv_drawn)
dev.off()
}
### bivar maps for figure 2 with adjusted legend positions
# color palette
pal <- matrix(pals::arc.bluepink(), nrow = 4, ncol = 4)
# use some sensible cut offs for prediction
# .5 is baseline because presence/TGB absence are reweighted to have equal weights
# in sum. probs above indicate presence liklier than absence.
cut_fit = c(0, .25, .5, .75, 1)
# ci width of .3 indicates that upper and lower CI will not be in same category
# of predicted probabilities as defined above, move down by factor 1/2
cut_se = c(0, .075, .15, .3, 1)
spec_figure_2 <- c("Triatoma infestans", "Triatoma dimidiata", "Triatoma gerstaeckeri",
"Rhodnius pictipes", "Panstrongylus geniculatus")
leg_pos <- list(
"Triatoma infestans" = c(x = .7, y = .1, width = .3, height = .3),
"Triatoma dimidiata" = c(x = .2, y = .1, width = .3, height = .3),
"Triatoma gerstaeckeri" = c(x = .59, y = .2, width = .25, height = .25),
"Rhodnius pictipes" = c(x = .75, y = .65, width = .2, height = .2),
"Panstrongylus geniculatus" = c(x = .75, y = .65, width = .25, height = .25)
)
folds <- readRDS("../../preprocessing/folds_list_vector_presence.Rds")[spec_figure_2]
# loop over species for figure 2
for(i in spec_figure_2) {
fold <- folds[[i]]
species_i <- fold$species
print(species_i)
tm_bv <- tm_bivar_raster(
path_pred = "predicted-maps-tgb-revision.grd",
path_ciu = "predicted-ciu-tgb-revision.grd",
path_cil = "predicted-cil-tgb-revision.grd",
fold,
palette = as.vector(t(pal)),
add_points_after = FALSE,
cut_fit = cut_fit,
cut_se = cut_se
)
pars_i <- leg_pos[[species_i]]
tm_bv_drawn <- tm_bivar_draw(tm_bv, x = pars_i["x"], y = pars_i["y"],
width = pars_i["width"], height = pars_i["height"] )
path <- paste0("figures/bivariate_maps/map_bivar_", sub(" ", "_", species_i))
png(paste0(path, ".png"), width = 600, height = 600)
print(tm_bv_drawn)
dev.off()
pdf(paste0(path, ".pdf"), width = 7, height = 7)
print(tm_bv_drawn)
dev.off()
}
################################################################################
############################### feature importance #############################
################################################################################
# Supplement figure on feature importance
cv_res <- readRDS("cv-results-final.Rds")
folds <- folds[names(cv_res)]
mean_term_contrib <- purrr::map2(
cv_res,
folds,
~ {
print(.x$species)
ndf <- env_grids %>% grid_to_df(.y$hull, .y$hull)
terms <- predict(.x$final_mod, newdata = ndf, type = "terms")
terms <- abs(terms)
terms <- terms/rowSums(terms)
# mean_term_contrib <-
colMeans(terms) * 100
}
)
nenv <- names(env_grids)
term_contrib <- purrr::map_dfr(
mean_term_contrib,
~{
out_df <- purrr:::map_dfr(
nenv,
function(var) {
ind <- grepl(var, names(.x))
if(all(!ind)) {
val <- NA
} else {
val <- .x[ind]
}
data.frame(term = var, value = val)
}
)
}, .id = "species")
contrib_gp <- term_contrib %>%
group_by(species) %>%
summarize(value = 100 - sum(value,na.rm=T)) %>%
mutate(term = "GP")
term_contrib <- rbind(term_contrib, contrib_gp) %>%
mutate(term = factor(
term,
levels = c(nenv, "GP"),
labels = c("accessibility", "elevation", "slope", "water",
"evergreen needleleaf forest", "evergreen broadleaf forest",
"deciduous needleleaf forest", "diciduous broadleaf forest", "mixed forest cover",
"closed shrubland cover", "open shrubland cover", "woody savannah cover",
"savannah cover", "grassland cover", "wetland cover", "cropland cover",
"urban and built up area cover", "cropland / vegetation mosaic",
"snow and ice cover", "barren area cover", "unclassified land cover",
"no data on land cover", "daytime temperature", "nighttime temperature",
"diurnal temperature difference", "nighttime lights", "human population count",
"vegetation index", "wetness on bare soil", "surface wetness",
"rainfall", "urbanicity", "Gaussian process")))
saveRDS(term_contrib, "mean_term_contrib.Rds")
top3_per_species <- term_contrib %>%
group_by(species) %>%
arrange(desc(value), .by_group = TRUE) %>%
slice(1:3)
readr::write_csv(top3_per_species, "term_contrib_top3_per_species.csv")
heat <- ggplot(term_contrib, aes(x = term, y = species)) +
geom_tile(aes(fill = value)) +
scale_fill_gradientn(colors = viridis::plasma(1e3), limits = c(0, 100)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
ggsave("figures/heat-map-term-contribution.pdf", width = 10, height = 9)
ggsave("figures/heat-map-term-contribution.png", width = 10, height = 9)
term_contrib_av <- term_contrib %>% group_by(term) %>%
summarize(mean_contrib = mean(value, na.rm = TRUE)) %>%
arrange(desc(mean_contrib))
readr::write_csv(term_contrib_av, "term_contrib_av.csv")
| /modeling/vector-occurrence-sdm/additional-analyses-revision.R | no_license | jgjuarez/chagas-vector-sdm | R | false | false | 13,160 | r | library(purrr)
library(tmap)
# library(stars)
library(ggplot2)
theme_set(theme_bw())
# library(tcruziutils)
devtools::load_all("../../tcruziutils")
library(dplyr)
library(parallel)
# spatial
# country polygons
countries <- readRDS("../../preprocessing/countries.Rds")
# raster brick with covariate values within endimic zone
env_grids <- raster::brick("../../preprocessing/env_grids.grd")
# island polygons to be excluded from analysis
exclude <- rgdal::readOGR("../../polygon boundaries/islands_americas.shp")
# data held out for evaluation (interpolation performance)
# used here for final model fit (but does not affect evaluation that was based
# on models fit without this data)
ev_df <- readRDS("../../preprocessing/presence_vector_evaluation.Rds")
# preprocessed data with presence/background data and spatial folds
# each element containt data for one species
folds <- readRDS("../../preprocessing/folds_list_vector_presence.Rds")
# model results ( each element contains results for one species )
# this was used for evaluation
# see cross-val-gam-tgb.Rds
cv_res <- readRDS("cv-results.Rds")
# model specifications used for analysis
settings_df <- purrr::cross_df(list(
type = c("linear", "smooth", "geospatial"),
GP = c(FALSE, TRUE)))
# remove species that errored (and category "other")
ind_keep <- map_lgl(cv_res, ~class(.x)[1] != "try-error") &
# map_lgl(folds, ~ (sum(.x$train$presence) + sum(.x$test$presence)) >= 100) &
map_lgl(folds, ~ !grepl("other", .x$species))
cv_res <- cv_res[ind_keep]
folds <- folds[ind_keep]
## smry table of best settings
settings_best <- map_dfr(
names(cv_res),
~{
print(.x)
get_spec_smry_df(cv_res, folds, settings_df, .x)
})
settings_best <- settings_best %>%
dplyr::mutate(
block_gp_ratio = block_width / gp_range,
range_gp_ratio = range / gp_range,
block_range_ratio = block_width / range)
settings_best
species_keep <- settings_best %>% pull(species)
cv_res <- cv_res[species_keep]
folds <- folds[species_keep]
## the best model is refit using all data, including data used previously for
# model validation (fold with blocks numbered 5 and random evaluation data)
pred_list <- mclapply(
seq_along(cv_res),
function(ind) {
.x <- cv_res[[ind]]
.y <- folds[[ind]]
print(.y$species)
hull <- .y$hull
# mask <- .y$hull %>% raster::crop(exclude)
ndf <- env_grids %>% grid_to_df(.y$hull, .y$hull)
ndf$species <- .y$species
bmod <- get_best_mod(.x)
form <- formula(bmod)
## refit with data from all blocks (1-5) + random hold-out for final prediction
# add block 5 to data
ndata <- rbind(.y$train, .y$test)
# add hold-out data
ndata_ev <- as_spatial(ev_df)[.y$hull, ]
ndata_ev$fold <- NA
ndata <- rbind(ndata_ev, ndata)
ndata$presence <- 0
ndata$presence <- 1L * (ndata$species == .y$species)
## readjust weights
ndata$wght <- 1
ndata$wght[ndata$presence == 0] <- sum(ndata$presence)/sum(!ndata$presence)
opt_gamma <- settings_best %>%
filter(species == .y$species) %>% pull(gamma)
bmod <- update(bmod, formula = form, data = as.data.frame(ndata),
gamma = opt_gamma)
pred <- predict(
bmod,
ndf,
type = "link",
discrete = FALSE,
se = TRUE)
ndf$prediction <- as.numeric(exp(pred$fit) / (1 + exp(pred$fit)))
ndf$ci_lower <- as.numeric(exp(pred$fit - 2 * pred$se) /
(1 + exp(pred$fit - 2 * pred$se)))
ndf$ci_upper <- as.numeric(exp(pred$fit + 2 * pred$se) /
(1 + exp(pred$fit + 2 * pred$se)))
nas <- rowSums(is.na(ndf[, c("prediction", "ci_lower", "ci_upper")])) > 0
if( sum(nas) > 0) {
ndf[nas,] <- NA
}
ndf <- ndf %>% filter(!is.na(prediction))
grid_prediction <- df_to_grid(ndf, env_grids[[1]], column = "prediction") %>%
raster::mask(exclude, inverse = TRUE)
grid_cilower <- df_to_grid(ndf, env_grids[[1]], column = "ci_lower") %>%
raster::mask(exclude, inverse = TRUE)
grid_ciupper <- df_to_grid(ndf, env_grids[[1]], column = "ci_upper") %>%
raster::mask(exclude, inverse = TRUE)
names(grid_prediction) <- names(grid_cilower) <- names(grid_ciupper) <-
.y$species
gc()
list(
species = .y$species,
final_mod = bmod,
prediction = grid_prediction,
ci_lower = grid_cilower,
ci_upper = grid_ciupper)
}, mc.cores = 5)
# store the final model within cv_res object
for(i in seq_along(pred_list)) {
cv_res[[pred_list[[i]]$species]]$final_mod <- pred_list[[i]]$final_mod
}
saveRDS(cv_res, "cv-results-final.Rds")
# combine raster for different species into raster brick
pred_brick <- pred_list %>% map(~.x$prediction) %>% reduce(raster::addLayer)
cil_brick <- pred_list %>% map(~.x$ci_lower) %>% reduce(raster::addLayer)
ciu_brick <- pred_list %>% map(~.x$ci_upper) %>% reduce(raster::addLayer)
# these are the final maps available online
raster::brick(
pred_brick,
filename = "predicted-maps-tgb-revision.grd",
bylayer = FALSE,
format = "raster",
overwrite = TRUE)
raster::brick(
cil_brick,
filename = "predicted-cil-tgb-revision.grd",
bylayer = FALSE,
format = "raster",
overwrite = TRUE)
raster::brick(
ciu_brick,
filename = "predicted-ciu-tgb-revision.grd",
bylayer = FALSE,
format = "raster",
overwrite = TRUE)
# save predicted maps (as pdf and png)
for (ind in seq_along(cv_res)) {
.x <- cv_res[[ind]]
.y <- folds[[ind]]
print(.y$species)
hull <- .y$hull
spec <- sub(" ", ".", .y$species)
# mask <- .y$hull %>% raster::crop(exclude)
shp <- countries %>% raster::crop(raster::extent(hull))
pred_grid <- raster::brick("predicted-maps-tgb-revision.grd")[[spec]] %>%
raster::crop(raster::extent(hull))
pred_map <- tm_shape(shp) +
tm_borders(alpha = .8) +
tm_shape(pred_grid) +
tm_raster(
alpha = .7, style = "cont", palette = viridis::magma(1e3),
breaks = seq(0, 1, by = .2), title = "prediction") +
tm_layout(
title = .y$species,
title.size = 2.5,
title.fontface = "italic",
legend.position = c("left", "bottom"),
legend.text.size = 1.2,
legend.hist.size = 1.2,
legend.title.size = 1.5)
path <- paste0("figures/final-predictions/pred-map-final-",
sub(" ", "-", .y$species), ".pdf")
tmap_save(pred_map, filename = path, width = 7, height = 7)
tmap_save(pred_map,
filename = paste0("figures/final-predictions/pred-map-final-",
sub(" ", "-", .y$species), ".png"), width = 7, height = 7)
}
################################################################################
############################# Bivariate Maps ###################################
################################################################################
# color palette
pal <- matrix(pals::arc.bluepink(), nrow = 4, ncol = 4)
# use some sensible cut offs for prediction
# .5 is baseline because presence/background are reweighted to have equal weights
# thus, probs above indicate presence liklier than absence.
cut_fit = c(0, .25, .5, .75, 1)
# SE of >=.3 implies that lower and upper CI are in different categories of the
# prediction as defined above, then go down in steps of 1/2
cut_se = c(0, .075, .15, .3, 1)
# loop over species and create bivariate maps
for(i in seq_along(cv_res)) {
cv <- cv_res[[i]]
species_i <- cv$species
print(species_i)
fold <- folds[[species_i]]
tm_bv <- tm_bivar_raster(
path_pred = "predicted-maps-tgb-revision.grd",
path_ciu = "predicted-ciu-tgb-revision.grd",
path_cil = "predicted-cil-tgb-revision.grd",
fold,
palette = as.vector(t(pal)),
add_points_after = FALSE,
cut_fit = cut_fit,
cut_se = cut_se
)
tm_bv_drawn <- tm_bivar_draw(tm_bv, x = .75, y = .1, width = .2, height = .2
)
path <- paste0("figures/bivariate_maps/map_bivar_", sub(" ", "_", species_i))
png(paste0(path, ".png"), width = 600, height = 600)
print(tm_bv_drawn)
dev.off()
pdf(paste0(path, ".pdf"), width = 7, height = 7)
print(tm_bv_drawn)
dev.off()
}
### bivar maps for figure 2 with adjusted legend positions
# color palette
pal <- matrix(pals::arc.bluepink(), nrow = 4, ncol = 4)
# use some sensible cut offs for prediction
# .5 is baseline because presence/TGB absence are reweighted to have equal weights
# in sum. probs above indicate presence liklier than absence.
cut_fit = c(0, .25, .5, .75, 1)
# ci width of .3 indicates that upper and lower CI will not be in same category
# of predicted probabilities as defined above, move down by factor 1/2
cut_se = c(0, .075, .15, .3, 1)
spec_figure_2 <- c("Triatoma infestans", "Triatoma dimidiata", "Triatoma gerstaeckeri",
"Rhodnius pictipes", "Panstrongylus geniculatus")
leg_pos <- list(
"Triatoma infestans" = c(x = .7, y = .1, width = .3, height = .3),
"Triatoma dimidiata" = c(x = .2, y = .1, width = .3, height = .3),
"Triatoma gerstaeckeri" = c(x = .59, y = .2, width = .25, height = .25),
"Rhodnius pictipes" = c(x = .75, y = .65, width = .2, height = .2),
"Panstrongylus geniculatus" = c(x = .75, y = .65, width = .25, height = .25)
)
folds <- readRDS("../../preprocessing/folds_list_vector_presence.Rds")[spec_figure_2]
# loop over species for figure 2
for(i in spec_figure_2) {
fold <- folds[[i]]
species_i <- fold$species
print(species_i)
tm_bv <- tm_bivar_raster(
path_pred = "predicted-maps-tgb-revision.grd",
path_ciu = "predicted-ciu-tgb-revision.grd",
path_cil = "predicted-cil-tgb-revision.grd",
fold,
palette = as.vector(t(pal)),
add_points_after = FALSE,
cut_fit = cut_fit,
cut_se = cut_se
)
pars_i <- leg_pos[[species_i]]
tm_bv_drawn <- tm_bivar_draw(tm_bv, x = pars_i["x"], y = pars_i["y"],
width = pars_i["width"], height = pars_i["height"] )
path <- paste0("figures/bivariate_maps/map_bivar_", sub(" ", "_", species_i))
png(paste0(path, ".png"), width = 600, height = 600)
print(tm_bv_drawn)
dev.off()
pdf(paste0(path, ".pdf"), width = 7, height = 7)
print(tm_bv_drawn)
dev.off()
}
################################################################################
############################### feature importance #############################
################################################################################
# Supplement figure on feature importance
cv_res <- readRDS("cv-results-final.Rds")
folds <- folds[names(cv_res)]
mean_term_contrib <- purrr::map2(
cv_res,
folds,
~ {
print(.x$species)
ndf <- env_grids %>% grid_to_df(.y$hull, .y$hull)
terms <- predict(.x$final_mod, newdata = ndf, type = "terms")
terms <- abs(terms)
terms <- terms/rowSums(terms)
# mean_term_contrib <-
colMeans(terms) * 100
}
)
nenv <- names(env_grids)
term_contrib <- purrr::map_dfr(
mean_term_contrib,
~{
out_df <- purrr:::map_dfr(
nenv,
function(var) {
ind <- grepl(var, names(.x))
if(all(!ind)) {
val <- NA
} else {
val <- .x[ind]
}
data.frame(term = var, value = val)
}
)
}, .id = "species")
contrib_gp <- term_contrib %>%
group_by(species) %>%
summarize(value = 100 - sum(value,na.rm=T)) %>%
mutate(term = "GP")
term_contrib <- rbind(term_contrib, contrib_gp) %>%
mutate(term = factor(
term,
levels = c(nenv, "GP"),
labels = c("accessibility", "elevation", "slope", "water",
"evergreen needleleaf forest", "evergreen broadleaf forest",
"deciduous needleleaf forest", "diciduous broadleaf forest", "mixed forest cover",
"closed shrubland cover", "open shrubland cover", "woody savannah cover",
"savannah cover", "grassland cover", "wetland cover", "cropland cover",
"urban and built up area cover", "cropland / vegetation mosaic",
"snow and ice cover", "barren area cover", "unclassified land cover",
"no data on land cover", "daytime temperature", "nighttime temperature",
"diurnal temperature difference", "nighttime lights", "human population count",
"vegetation index", "wetness on bare soil", "surface wetness",
"rainfall", "urbanicity", "Gaussian process")))
saveRDS(term_contrib, "mean_term_contrib.Rds")
top3_per_species <- term_contrib %>%
group_by(species) %>%
arrange(desc(value), .by_group = TRUE) %>%
slice(1:3)
readr::write_csv(top3_per_species, "term_contrib_top3_per_species.csv")
heat <- ggplot(term_contrib, aes(x = term, y = species)) +
geom_tile(aes(fill = value)) +
scale_fill_gradientn(colors = viridis::plasma(1e3), limits = c(0, 100)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
ggsave("figures/heat-map-term-contribution.pdf", width = 10, height = 9)
ggsave("figures/heat-map-term-contribution.png", width = 10, height = 9)
term_contrib_av <- term_contrib %>% group_by(term) %>%
summarize(mean_contrib = mean(value, na.rm = TRUE)) %>%
arrange(desc(mean_contrib))
readr::write_csv(term_contrib_av, "term_contrib_av.csv")
|
#import libs
library(ggplot2)
library(readr)
library(fma)
library(reshape2)
library(lubridate)
library(plotly)
library(magrittr)
library(zoo)
library(xts)
library(forecast)
library(TSstudio)
library(tseries)
library(aTSA)
library(ggsci)
library(ggpubr)
library(Rssa)
####import data####
incoming <- read.csv("~/Documents/GitHub/jus_incoming/Incoming.csv", sep=";")
class(incoming)
incoming <- incoming[2]
View(incoming)
head(incoming)
colnames(incoming)
class(incoming)
incoming_ts <- ts(incoming, start=c(2007, 1), end=c(2017,12), frequency = 12)
start(incoming_ts)
end(incoming_ts)
print(incoming_ts)
####Plotting####
text1 <- "Average Number of Proceedings"
par(mfrow=c(1,1))
par(cex.axis=0.9)
axis(1, at = seq(from=2007, by = 0.25))
inc_plot <-plot.ts(incoming_ts, main="Judicial Proceedings from 2007 to 2017",
xlab="Date", ylab="Incoming Proceedings", type="l", col='black', axes = TRUE, )
abline(h=mean(incoming_ts), col='yellow', lwd='2')
legend("bottomright", inset = .05, text1,col='yellow',lty=1,ncol=1,cex=0.5,lwd=2.5)
####Seasonality Ana Decomposition####
#Analysing the variation by year, month, and monthly boxplots
ts_seasonal(incoming_ts, type = 'box')
#Other tests & all
ts_seasonal(incoming_ts, type = 'normal')
ts_seasonal(incoming_ts, type = 'cycle')
ts_seasonal(incoming_ts, type = 'all')
#Heatmap
ts_heatmap(incoming_ts)
#Surface plot
ts_surface(incoming_ts)
ts_lags(incoming_ts, n_row = 6, lag.max = 36)
decompose(incoming_ts)
ts_decompose(incoming_ts, showline = T )
decompose(incoming_ts)
lambdaincoming<-BoxCox.lambda(incoming_ts)
# as box cox is <> 0,we're going to apply a box cox transformation to log data.
plot(diff(BoxCox(incoming_ts, lambdaincoming)))
acf(diff(BoxCox(incoming_ts, lambdaincoming)))
pacf(diff(BoxCox(incoming_ts, lambdaincoming)))
arima011011<-arima(x=incoming_ts,order = c(0,1,1), seasonal = c(0,1,1))
auto.arima(incoming_ts, stepwise = FALSE, trace = TRUE, max.order = 4)
#Test set vs train set
datared <- window(incoming_ts, start=c(2007,1), end=c(2017,12))
h_out <- 24
split_incomingTS <- ts_split(incoming_ts, sample.out = h_out)
length(train)
length(test)
train <- split_incomingTS$train
train
test <- split_incomingTS$test
test
snaive(incoming_ts, h=6)
model1 <- forecast::forecast(auto.arima(train), h=12)
model2 <- forecast::forecast(arima(train, order = c(0,1,1), seasonal = c(0,1,1)), h=24)
model2_diag <- arima(x = incoming_ts, order = c(0,1,1), seasonal = c(0,1,1))
plot.ts(incoming_ts)
lines(incoming_ts-model2$residuals, col='red')
check_res(model2)
par(mfrow=c(1,1))
axis(2, seq(0, 40000, 10000), las=2)
plot(model2, fcol = "#1E4ABB" ) ##shadecols = c('#BEE5A5', '#76C3AC'))
#Residuals Diagnosis
plot(model2$residuals)
tsdiag(model2_diag)
model3 <- Arima(train, order = c(0,0,1), seasonal = c(0,0,1))
model3_fc <- forecast(model3, lead = h_out)
test_forecast(actual = incoming_ts, forecast.obj = model2, test = test)
teste1<-auto.arima(incoming_ts)
teste1$residuals
teste1$sigma2
teste1$aic
##Aplication of SSA
incoming_ts<-zooreg(1:132, frequency = 12, start = c(2007, 1))
class(co2)
class(incoming)
decomp_ssa <- ssa(incoming_ts)
decomp_ssa$window
decomp_ssa$length
summary(decomp_ssa)
plot(decomp_ssa, type = "values")
plot(decomp_ssa, type = "vectors") # Eigenvectors
plot(decomp_ssa, type = "paired") # Pairs of eigenvectors
plot(wcor(decomp_ssa)) # w-correlation matrix plot
rec_ssa <-reconstruct(decomp_ssa, groups)
| /script.R | no_license | sssusana/jus_incoming | R | false | false | 3,456 | r | #import libs
library(ggplot2)
library(readr)
library(fma)
library(reshape2)
library(lubridate)
library(plotly)
library(magrittr)
library(zoo)
library(xts)
library(forecast)
library(TSstudio)
library(tseries)
library(aTSA)
library(ggsci)
library(ggpubr)
library(Rssa)
####import data####
incoming <- read.csv("~/Documents/GitHub/jus_incoming/Incoming.csv", sep=";")
class(incoming)
incoming <- incoming[2]
View(incoming)
head(incoming)
colnames(incoming)
class(incoming)
incoming_ts <- ts(incoming, start=c(2007, 1), end=c(2017,12), frequency = 12)
start(incoming_ts)
end(incoming_ts)
print(incoming_ts)
####Plotting####
text1 <- "Average Number of Proceedings"
par(mfrow=c(1,1))
par(cex.axis=0.9)
axis(1, at = seq(from=2007, by = 0.25))
inc_plot <-plot.ts(incoming_ts, main="Judicial Proceedings from 2007 to 2017",
xlab="Date", ylab="Incoming Proceedings", type="l", col='black', axes = TRUE, )
abline(h=mean(incoming_ts), col='yellow', lwd='2')
legend("bottomright", inset = .05, text1,col='yellow',lty=1,ncol=1,cex=0.5,lwd=2.5)
####Seasonality Ana Decomposition####
#Analysing the variation by year, month, and monthly boxplots
ts_seasonal(incoming_ts, type = 'box')
#Other tests & all
ts_seasonal(incoming_ts, type = 'normal')
ts_seasonal(incoming_ts, type = 'cycle')
ts_seasonal(incoming_ts, type = 'all')
#Heatmap
ts_heatmap(incoming_ts)
#Surface plot
ts_surface(incoming_ts)
ts_lags(incoming_ts, n_row = 6, lag.max = 36)
decompose(incoming_ts)
ts_decompose(incoming_ts, showline = T )
decompose(incoming_ts)
lambdaincoming<-BoxCox.lambda(incoming_ts)
# as box cox is <> 0,we're going to apply a box cox transformation to log data.
plot(diff(BoxCox(incoming_ts, lambdaincoming)))
acf(diff(BoxCox(incoming_ts, lambdaincoming)))
pacf(diff(BoxCox(incoming_ts, lambdaincoming)))
arima011011<-arima(x=incoming_ts,order = c(0,1,1), seasonal = c(0,1,1))
auto.arima(incoming_ts, stepwise = FALSE, trace = TRUE, max.order = 4)
#Test set vs train set
datared <- window(incoming_ts, start=c(2007,1), end=c(2017,12))
h_out <- 24
split_incomingTS <- ts_split(incoming_ts, sample.out = h_out)
length(train)
length(test)
train <- split_incomingTS$train
train
test <- split_incomingTS$test
test
snaive(incoming_ts, h=6)
model1 <- forecast::forecast(auto.arima(train), h=12)
model2 <- forecast::forecast(arima(train, order = c(0,1,1), seasonal = c(0,1,1)), h=24)
model2_diag <- arima(x = incoming_ts, order = c(0,1,1), seasonal = c(0,1,1))
plot.ts(incoming_ts)
lines(incoming_ts-model2$residuals, col='red')
check_res(model2)
par(mfrow=c(1,1))
axis(2, seq(0, 40000, 10000), las=2)
plot(model2, fcol = "#1E4ABB" ) ##shadecols = c('#BEE5A5', '#76C3AC'))
#Residuals Diagnosis
plot(model2$residuals)
tsdiag(model2_diag)
model3 <- Arima(train, order = c(0,0,1), seasonal = c(0,0,1))
model3_fc <- forecast(model3, lead = h_out)
test_forecast(actual = incoming_ts, forecast.obj = model2, test = test)
teste1<-auto.arima(incoming_ts)
teste1$residuals
teste1$sigma2
teste1$aic
##Aplication of SSA
incoming_ts<-zooreg(1:132, frequency = 12, start = c(2007, 1))
class(co2)
class(incoming)
decomp_ssa <- ssa(incoming_ts)
decomp_ssa$window
decomp_ssa$length
summary(decomp_ssa)
plot(decomp_ssa, type = "values")
plot(decomp_ssa, type = "vectors") # Eigenvectors
plot(decomp_ssa, type = "paired") # Pairs of eigenvectors
plot(wcor(decomp_ssa)) # w-correlation matrix plot
rec_ssa <-reconstruct(decomp_ssa, groups)
|
#geom_boxplot does not inherently use the 95% confidence interval as it's whiskers
#we would like to force it to by using a stat_summary modification
#to do this we writ a function to feed into stat_summary and specifying the output as a boxplot
#ToothGrowth is a built in data set
quantiles_95 <- function(x) {
r <- quantile(x, probs=c(0.05, 0.25, 0.5, 0.75, 0.95))
#quantile is a stats function that takes a vector "x" and returns the value at a specified quantile
# 0.5 is the median or the middle line of our new boxplot
# 0.25 and 0.75 will give use the IQR so the box of our boxplot
# 0.05 and 0.95 are our 95% CI which gives us our whiskers
names(r) <- c("ymin", "lower", "middle", "upper", "ymax")
r
}
#just to see the output of the function
quantiles_95(ToothGrowth$len) #we input ToothGrowth$len as x because that's what we are plotting on the y
#ggplot is smart enough to split this by our x value but we could do it ourselves
quantiles_95(ToothGrowth$len[ToothGrowth$supp=="OJ"])
quantiles_95(ToothGrowth$len[ToothGrowth$supp=="VC"])
#now compare the two plots one generated by geom_boxplot and one generated by our function
g<-ggplot(ToothGrowth, aes(x=supp, y=len))+
geom_boxplot()+
ggtitle("built in")+
coord_cartesian(ylim=c(0,35)) #to make sure the two plots are on the same scale
c<-ggplot(ToothGrowth, aes(x=supp, y=len))+
stat_summary(fun.data = quantiles_95, geom="boxplot")+
ggtitle("our function")+
coord_cartesian(ylim=c(0,35)) #to make sure the two plots are on the same scale
grid.arrange(g,c, ncol=2)
| /ggplot_boxplot_95CI.R | no_license | tarynam/random-code | R | false | false | 1,585 | r | #geom_boxplot does not inherently use the 95% confidence interval as it's whiskers
#we would like to force it to by using a stat_summary modification
#to do this we writ a function to feed into stat_summary and specifying the output as a boxplot
#ToothGrowth is a built in data set
quantiles_95 <- function(x) {
r <- quantile(x, probs=c(0.05, 0.25, 0.5, 0.75, 0.95))
#quantile is a stats function that takes a vector "x" and returns the value at a specified quantile
# 0.5 is the median or the middle line of our new boxplot
# 0.25 and 0.75 will give use the IQR so the box of our boxplot
# 0.05 and 0.95 are our 95% CI which gives us our whiskers
names(r) <- c("ymin", "lower", "middle", "upper", "ymax")
r
}
#just to see the output of the function
quantiles_95(ToothGrowth$len) #we input ToothGrowth$len as x because that's what we are plotting on the y
#ggplot is smart enough to split this by our x value but we could do it ourselves
quantiles_95(ToothGrowth$len[ToothGrowth$supp=="OJ"])
quantiles_95(ToothGrowth$len[ToothGrowth$supp=="VC"])
#now compare the two plots one generated by geom_boxplot and one generated by our function
g<-ggplot(ToothGrowth, aes(x=supp, y=len))+
geom_boxplot()+
ggtitle("built in")+
coord_cartesian(ylim=c(0,35)) #to make sure the two plots are on the same scale
c<-ggplot(ToothGrowth, aes(x=supp, y=len))+
stat_summary(fun.data = quantiles_95, geom="boxplot")+
ggtitle("our function")+
coord_cartesian(ylim=c(0,35)) #to make sure the two plots are on the same scale
grid.arrange(g,c, ncol=2)
|
#########################################
# Downloading processed datasets into R #
#########################################
library(readr) # to read stata dta files
library(haven) # to read sas files
# Loading data ------------------------------------------------------------
# charitable data
filename.charitable <- "https://raw.githubusercontent.com/gsbDBI/ExperimentData/master/Charitable/ProcessedData/charitable_withdummyvariables.csv"
charitable <- read_csv(url(filename.charitable))
# mobilization data
temp <- tempfile()
download.file("https://raw.githubusercontent.com/gsbDBI/ExperimentData/master/Mobilization/ProcessedData/mobilization_with_unlisted.zip",temp)
mobilization <- read_csv(unz(temp, "mobilization_with_unlisted.csv"))
unlink(temp)
# secrecy data
filename.secrecy <- "https://raw.githubusercontent.com/gsbDBI/ExperimentData/master/Secrecy/ProcessedData/ct_ballotsecrecy_processed.csv"
secrecy <- read_csv(url(filename.secrecy))
# social data
filename.social <- "https://raw.githubusercontent.com/gsbDBI/ExperimentData/master/Social/ProcessedData/socialpressnofact.csv"
social <- read_csv(url(filename.social))
# welfare data
filename.welfare <- "https://raw.githubusercontent.com/gsbDBI/ExperimentData/master/Welfare/ProcessedData/welfarenolabel3.csv"
welfare <- read_csv(url(filename.welfare))
# IV data
filename.IV <- "IV Datasets/RawData/NEW7080.dta"
IVdataset <- read_dta(filename.IV)
colnames(IVdataset) <- c("AGE", "AGEQ", "v3", "EDUC", "ENOCENT", "ESOCENT", "v7", "v8",
"LWKLYWGE", "MARRIED", "MIDATL", "MT", "NEWENG", "v14", "v15",
"CENSUS", "v17", "QOB", "RACE", "SMSA", "SOATL", "v22", "v23",
"WNOCENT", "WSOCENT", "v26", "YOB")
# vouchers data
filename.vouchers <- "http://economics.mit.edu/files/1393"
vouchers <- read_sas(url(filename.vouchers))
# Saving data -------------------------------------------------------------
usethis::use_data(charitable, mobilization, secrecy,
social, welfare, IVdataset, vouchers,
overwrite = TRUE)
| /data-raw/downloadData.R | permissive | lnsongxf/experimentdatar | R | false | false | 2,098 | r | #########################################
# Downloading processed datasets into R #
#########################################
library(readr) # to read stata dta files
library(haven) # to read sas files
# Loading data ------------------------------------------------------------
# charitable data
filename.charitable <- "https://raw.githubusercontent.com/gsbDBI/ExperimentData/master/Charitable/ProcessedData/charitable_withdummyvariables.csv"
charitable <- read_csv(url(filename.charitable))
# mobilization data
temp <- tempfile()
download.file("https://raw.githubusercontent.com/gsbDBI/ExperimentData/master/Mobilization/ProcessedData/mobilization_with_unlisted.zip",temp)
mobilization <- read_csv(unz(temp, "mobilization_with_unlisted.csv"))
unlink(temp)
# secrecy data
filename.secrecy <- "https://raw.githubusercontent.com/gsbDBI/ExperimentData/master/Secrecy/ProcessedData/ct_ballotsecrecy_processed.csv"
secrecy <- read_csv(url(filename.secrecy))
# social data
filename.social <- "https://raw.githubusercontent.com/gsbDBI/ExperimentData/master/Social/ProcessedData/socialpressnofact.csv"
social <- read_csv(url(filename.social))
# welfare data
filename.welfare <- "https://raw.githubusercontent.com/gsbDBI/ExperimentData/master/Welfare/ProcessedData/welfarenolabel3.csv"
welfare <- read_csv(url(filename.welfare))
# IV data
filename.IV <- "IV Datasets/RawData/NEW7080.dta"
IVdataset <- read_dta(filename.IV)
colnames(IVdataset) <- c("AGE", "AGEQ", "v3", "EDUC", "ENOCENT", "ESOCENT", "v7", "v8",
"LWKLYWGE", "MARRIED", "MIDATL", "MT", "NEWENG", "v14", "v15",
"CENSUS", "v17", "QOB", "RACE", "SMSA", "SOATL", "v22", "v23",
"WNOCENT", "WSOCENT", "v26", "YOB")
# vouchers data
filename.vouchers <- "http://economics.mit.edu/files/1393"
vouchers <- read_sas(url(filename.vouchers))
# Saving data -------------------------------------------------------------
usethis::use_data(charitable, mobilization, secrecy,
social, welfare, IVdataset, vouchers,
overwrite = TRUE)
|
with(a45725d8cc78f453b9fe41a4c2617580d, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';FRAME878836[,(c('report_run_end_date','modified_date','created_date','report_run_date')) := lapply(.SD, function(x) as.Date(x, format='%Y-%m-%d')), .SDcols = c('report_run_end_date','modified_date','created_date','report_run_date')]}); | /80bb2a25-ac5d-47d0-abfc-b3f3811f0936/R/Temp/aXslbbYajIKjl.R | no_license | ayanmanna8/test | R | false | false | 431 | r | with(a45725d8cc78f453b9fe41a4c2617580d, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';FRAME878836[,(c('report_run_end_date','modified_date','created_date','report_run_date')) := lapply(.SD, function(x) as.Date(x, format='%Y-%m-%d')), .SDcols = c('report_run_end_date','modified_date','created_date','report_run_date')]}); |
leveneFast <- function(dat, groups) {
require(matrixStats)
N = length(groups)
k = length(unique(groups))
gIndexes = split(seq(along=groups),groups)
gLens = as.numeric(sapply(gIndexes,length))
zij = dat
for(i in seq(along=gIndexes)) {
ind = gIndexes[[i]]
zij[,ind] = abs(dat[,ind] - rowMedians(dat[,ind]))
}
zdotdot = rowMeans(zij)
zidot = sapply(gIndexes, function(x) rowMeans(zij[,x]))
num = rowSums(gLens*(zidot-zdotdot)^2)
out = zidot
for(i in seq(along=gIndexes)) {
ind = gIndexes[[i]]
out[,i] = rowSums((zij[,ind] - zidot[,i])^2)
}
denom = rowSums(out)
W = ((N-k)/(k-1))*(num/denom)
pval = pf(W, df1 = k-1, df2 = N-k, lower.tail = FALSE)
return(data.frame(cbind(W,pval)))
}
| /case_control/leveneFast.R | no_license | LieberInstitute/zandiHyde_bipolar_rnaseq | R | false | false | 753 | r |
leveneFast <- function(dat, groups) {
require(matrixStats)
N = length(groups)
k = length(unique(groups))
gIndexes = split(seq(along=groups),groups)
gLens = as.numeric(sapply(gIndexes,length))
zij = dat
for(i in seq(along=gIndexes)) {
ind = gIndexes[[i]]
zij[,ind] = abs(dat[,ind] - rowMedians(dat[,ind]))
}
zdotdot = rowMeans(zij)
zidot = sapply(gIndexes, function(x) rowMeans(zij[,x]))
num = rowSums(gLens*(zidot-zdotdot)^2)
out = zidot
for(i in seq(along=gIndexes)) {
ind = gIndexes[[i]]
out[,i] = rowSums((zij[,ind] - zidot[,i])^2)
}
denom = rowSums(out)
W = ((N-k)/(k-1))*(num/denom)
pval = pf(W, df1 = k-1, df2 = N-k, lower.tail = FALSE)
return(data.frame(cbind(W,pval)))
}
|
sim_data_generation <- function(params, ...){
betas <- rnorm(m, 0, sqrt((0.50 - var_inter)))
tibble(betas = betas)
} | /sim_data_generation.R | no_license | katiecoburn/effect_size_proj | R | false | false | 128 | r | sim_data_generation <- function(params, ...){
betas <- rnorm(m, 0, sqrt((0.50 - var_inter)))
tibble(betas = betas)
} |
ps.pkg.info = function() {
# Name of this package
package = "RTutorAirports"
# Name of problem sets in the package
ps = c("Airports")
list(package=package, ps = ps)
}
| /R/package_info.r | no_license | kst92/RTutorAirports | R | false | false | 183 | r | ps.pkg.info = function() {
# Name of this package
package = "RTutorAirports"
# Name of problem sets in the package
ps = c("Airports")
list(package=package, ps = ps)
}
|
\name{TkPredict}
\alias{TkPredict}
\alias{Predict.Plot}
%- Also NEED an '\alias' for EACH other topic documented here.
%- cp slider2.Rd /home/wiwi/pwolf/work/work.rtrevive/install.dir/rwined/man/slider.Rd
\title{Plot predicted values from a model against one of the predictors
for a given value of the othe predictors}
\description{
These functions create a plot of predicted values vs. one of the
predictors for given values of the other predictors. TkPredict
further creates a Tk gui to allow you to change the values of the
other predictors.
}
\usage{
Predict.Plot(model, pred.var, ..., type='response', add=FALSE,
plot.args=list(), n.points=100, ref.val, ref.col='green', ref.lty=1,
data)
TkPredict(model, data, pred.var, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{model}{A model of class 'lm' or 'glm' (or possibly others) from
which to plot predictions.}
\item{pred.var}{A character string indicating which predictor variable
to put on the x-axis of the plot.}
\item{...}{for \code{Predict.Plot} The predictor variables and their
values for the predictions. See below for detail.}
\item{type}{The type value passed on to the predict function.}
\item{add}{Whether to add a line to the existing plot or start a new
plot.}
\item{plot.args}{A list of additional options passed on to the
plotting function.}
\item{n.points}{The number of points to use in the approximation of
the curve.}
\item{ref.val}{A reference value for the \code{pred.var}, a reference
line will be drawn at this value to the corresponding predicted value.}
\item{ref.col, ref.lty}{The color and line type of the reference line
if plotted.}
\item{data}{The data frame or environment where the variables that the
model was fit to are found. If missing, the model will be examined
for an attempt find the needed data.}
}
\details{
These functions plot the predicted values from a regression model
(\code{lm} or \code{glm}) against one of the predictor variables for
given values of the other predictors. The values of the other
predictors are passed as the \code{...} argument to
\code{Predict.Plot} or are set using gui controls in \code{TkPredict}
(initial values are the medians).
If the variable for the x axis (name put in \code{pred.var}) is not
included with the \code{...} variables, then the range will be
computed from the \code{data} argument or the data component of the
\code{model} argument.
If the variable passed as \code{pred.var} is also included in the
\code{...} arguments and contains a single value, then this value will
be used as the \code{ref.val} argument.
If it contains 2 or more values, then the range of these values will
be used as the x-limits for the predictions.
When running \code{TkPredict} you can click on the "Print Call" button
to print out the call of \code{Predict.Plot} that will recreate the
same plot. Doing this for different combinations of predictor values
and editing the \code{plot.args} and \code{add} arguments will give
you a script that will create a static version of the predictions.
}
\value{
These functions are run for their side effects of creating plots and
do not return anything.
}
\author{Greg Snow, \email{538280@gmail.com}}
\seealso{ \code{tkrplot}, \code{\link{tkexamp}}, \code{\link{predict}} }
\note{ The GUI currently allows you to select a factor as the
x-variable. If you do this it will generate some errors and you will
not see the plot, just choose a different variable as the x-variable
and the plot will return. }
\examples{
library(splines)
fit.lm1 <- lm( Sepal.Width ~ ns(Petal.Width,3)*ns(Petal.Length,3)+Species,
data=iris)
Predict.Plot(fit.lm1, pred.var = "Petal.Width", Petal.Width = 1.22,
Petal.Length = 4.3, Species = "versicolor",
plot.args = list(ylim=range(iris$Sepal.Width), col='blue'),
type = "response")
Predict.Plot(fit.lm1, pred.var = "Petal.Width", Petal.Width = 1.22,
Petal.Length = 4.3, Species = "virginica",
plot.args = list(col='red'),
type = "response", add=TRUE)
Predict.Plot(fit.lm1, pred.var = "Petal.Width", Petal.Width = 1.22,
Petal.Length = 4.4, Species = "virginica",
plot.args = list(col='purple'),
type = "response", add=TRUE)
fit.glm1 <- glm( Species=='virginica' ~ Sepal.Width+Sepal.Length,
data=iris, family=binomial)
Predict.Plot(fit.glm1, pred.var = "Sepal.Length", Sepal.Width = 1.99,
Sepal.Length = 6.34, plot.args = list(ylim=c(0,1), col='blue'),
type = "response")
Predict.Plot(fit.glm1, pred.var = "Sepal.Length", Sepal.Width = 4.39,
Sepal.Length = 6.34, plot.args = list(col='red'),
type = "response", add=TRUE)
if(interactive()){
TkPredict(fit.lm1)
TkPredict(fit.glm1)
}
}
\keyword{dynamic}% at least one, from doc/KEYWORDS
\keyword{iplot}% __ONLY ONE__ keyword per line
\keyword{regression}
| /man/TkPredict.Rd | no_license | glsnow/TeachingDemos | R | false | false | 4,905 | rd | \name{TkPredict}
\alias{TkPredict}
\alias{Predict.Plot}
%- Also NEED an '\alias' for EACH other topic documented here.
%- cp slider2.Rd /home/wiwi/pwolf/work/work.rtrevive/install.dir/rwined/man/slider.Rd
\title{Plot predicted values from a model against one of the predictors
for a given value of the othe predictors}
\description{
These functions create a plot of predicted values vs. one of the
predictors for given values of the other predictors. TkPredict
further creates a Tk gui to allow you to change the values of the
other predictors.
}
\usage{
Predict.Plot(model, pred.var, ..., type='response', add=FALSE,
plot.args=list(), n.points=100, ref.val, ref.col='green', ref.lty=1,
data)
TkPredict(model, data, pred.var, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{model}{A model of class 'lm' or 'glm' (or possibly others) from
which to plot predictions.}
\item{pred.var}{A character string indicating which predictor variable
to put on the x-axis of the plot.}
\item{...}{for \code{Predict.Plot} The predictor variables and their
values for the predictions. See below for detail.}
\item{type}{The type value passed on to the predict function.}
\item{add}{Whether to add a line to the existing plot or start a new
plot.}
\item{plot.args}{A list of additional options passed on to the
plotting function.}
\item{n.points}{The number of points to use in the approximation of
the curve.}
\item{ref.val}{A reference value for the \code{pred.var}, a reference
line will be drawn at this value to the corresponding predicted value.}
\item{ref.col, ref.lty}{The color and line type of the reference line
if plotted.}
\item{data}{The data frame or environment where the variables that the
model was fit to are found. If missing, the model will be examined
for an attempt find the needed data.}
}
\details{
These functions plot the predicted values from a regression model
(\code{lm} or \code{glm}) against one of the predictor variables for
given values of the other predictors. The values of the other
predictors are passed as the \code{...} argument to
\code{Predict.Plot} or are set using gui controls in \code{TkPredict}
(initial values are the medians).
If the variable for the x axis (name put in \code{pred.var}) is not
included with the \code{...} variables, then the range will be
computed from the \code{data} argument or the data component of the
\code{model} argument.
If the variable passed as \code{pred.var} is also included in the
\code{...} arguments and contains a single value, then this value will
be used as the \code{ref.val} argument.
If it contains 2 or more values, then the range of these values will
be used as the x-limits for the predictions.
When running \code{TkPredict} you can click on the "Print Call" button
to print out the call of \code{Predict.Plot} that will recreate the
same plot. Doing this for different combinations of predictor values
and editing the \code{plot.args} and \code{add} arguments will give
you a script that will create a static version of the predictions.
}
\value{
These functions are run for their side effects of creating plots and
do not return anything.
}
\author{Greg Snow, \email{538280@gmail.com}}
\seealso{ \code{tkrplot}, \code{\link{tkexamp}}, \code{\link{predict}} }
\note{ The GUI currently allows you to select a factor as the
x-variable. If you do this it will generate some errors and you will
not see the plot, just choose a different variable as the x-variable
and the plot will return. }
\examples{
library(splines)
fit.lm1 <- lm( Sepal.Width ~ ns(Petal.Width,3)*ns(Petal.Length,3)+Species,
data=iris)
Predict.Plot(fit.lm1, pred.var = "Petal.Width", Petal.Width = 1.22,
Petal.Length = 4.3, Species = "versicolor",
plot.args = list(ylim=range(iris$Sepal.Width), col='blue'),
type = "response")
Predict.Plot(fit.lm1, pred.var = "Petal.Width", Petal.Width = 1.22,
Petal.Length = 4.3, Species = "virginica",
plot.args = list(col='red'),
type = "response", add=TRUE)
Predict.Plot(fit.lm1, pred.var = "Petal.Width", Petal.Width = 1.22,
Petal.Length = 4.4, Species = "virginica",
plot.args = list(col='purple'),
type = "response", add=TRUE)
fit.glm1 <- glm( Species=='virginica' ~ Sepal.Width+Sepal.Length,
data=iris, family=binomial)
Predict.Plot(fit.glm1, pred.var = "Sepal.Length", Sepal.Width = 1.99,
Sepal.Length = 6.34, plot.args = list(ylim=c(0,1), col='blue'),
type = "response")
Predict.Plot(fit.glm1, pred.var = "Sepal.Length", Sepal.Width = 4.39,
Sepal.Length = 6.34, plot.args = list(col='red'),
type = "response", add=TRUE)
if(interactive()){
TkPredict(fit.lm1)
TkPredict(fit.glm1)
}
}
\keyword{dynamic}% at least one, from doc/KEYWORDS
\keyword{iplot}% __ONLY ONE__ keyword per line
\keyword{regression}
|
#' Função chega o SO utilizado e seta o diretório
#' This function check the OS and change work directory
setWorkspace <- function() {
mySystem <- Sys.info()
if (mySystem[[1]] == "Linux") {
setwd("~/R/karliane/projeto_karliane/flexcon_c")
} else {
setwd("C:\\local_R\\projeto_karliane\\flexcon_c")
}
}
# args = commandArgs(trailingOnly=TRUE)
# if ((args == "-h") || (args == "--help")) {
# cat("The arg must be integer between 1-4!\n1 - NaiveBayes\n2 - rpartXse",
# "\n3 - JRip\n4 - IBk")
# } else if ((as.integer(args) == F) || (is.na(as.integer(args))) ||
# (as.integer(args) > 4) || (as.integer(args) < 1)) {
# stop("The arg must be integer between 1-4!\n1 - NaiveBayes\n2 - rpartXse",
# "\n3 - JRip\n4 - IBk")
# } else {
args <- 1
setWorkspace()
source("functions.R")
source("utils.R")
source("crossValidation.R")
initGlobalVariables()
defines()
medias_c1_s <- cleanVector(medias_c1_s)
medias_c1_v <- cleanVector(medias_c1_v)
medias_c2 <- cleanVector(medias_c2)
medias_self <- cleanVector(medias_self)
cl <- as.integer(args)
param <- whichDB(classifiers[cl])
ini_cr <- param$cr
ini_bd <- param$bd
for(i in ini_bd:2) { #Iris
base_original <- getDatabase(i)
k_NN <- attKValue(base_original)
qtd_exem_por_classe <- ddply(base_original, ~class, summarise,
distinct_orders = length(class))
qtd_exem_menor_classe <- trunc(min(qtd_exem_por_classe$distinct_orders) * 0.1)
folds <- crossValidation(base_original, base_original$class)
for (cr in 5:5) { #2
for(j in 1:5) { #1
taxa <- j * 5 # 5%
acc_c1_s <- cleanVector(acc_c1_s)
acc_c1_v <- cleanVector(acc_c1_v)
acc_c2 <- cleanVector(acc_c2)
acc_self <- cleanVector(acc_self)
for (fold in 1:length(folds)) {
base_teste <- base_original[folds[[fold]], ]
base <- base_original[- folds[[fold]], ]
treinamento <<- base_rotulada_treino <- base
#sorteando os exemplos que ficarão rotulados inicialmente
cat("\nBD:", i, " CL:", cl, " CR:", cr, " TX:", j, " FOLD:",
fold)
H2 <- holdout(base_rotulada_treino$class, ratio = (taxa / 100),
mode = "stratified")
ids_treino_rot <- H2$tr
base <- newBase(base_rotulada_treino, ids_treino_rot)
base_rotulados_ini <- base_rotulada_treino[ids_treino_rot, ]
source('training.R')
}
# medias_c1_s <- appendVectors(medias_c1_s, acc_c1_s)
# medias_c1_v <- appendVectors(medias_c1_v, acc_c1_v)
# medias_c2 <- appendVectors(medias_c2, acc_c2)
medias_self <- appendVectors(medias_self, acc_self)
}
data_arquivo_o <- data.frame(bd_g_o, tx_g_o, it_g_o, thrConf_g_o,
nr_added_exs_g_o, acertou_g_o)
outputArchive(cr, as.character(classifiers[cl]), medias_c1_s, medias_c1_v,
medias_c2, medias_self)
write.csv(data_arquivo_o, paste(c("resultado", classifiers[cl], "095.csv"),
collapse = "_"), row.names = FALSE)
# medias_c1_s <- cleanVector(medias_c1_s)
# medias_c1_v <- cleanVector(medias_c1_v)
# medias_c2 <- cleanVector(medias_c2)
medias_self <- cleanVector(medias_self)
}
# if(ini_cr != 2) {
# ini_cr = 2
# }
}
# }
| /flexcon_c/main.R | no_license | karlianev/projeto_karliane | R | false | false | 3,403 | r | #' Função chega o SO utilizado e seta o diretório
#' This function check the OS and change work directory
setWorkspace <- function() {
mySystem <- Sys.info()
if (mySystem[[1]] == "Linux") {
setwd("~/R/karliane/projeto_karliane/flexcon_c")
} else {
setwd("C:\\local_R\\projeto_karliane\\flexcon_c")
}
}
# args = commandArgs(trailingOnly=TRUE)
# if ((args == "-h") || (args == "--help")) {
# cat("The arg must be integer between 1-4!\n1 - NaiveBayes\n2 - rpartXse",
# "\n3 - JRip\n4 - IBk")
# } else if ((as.integer(args) == F) || (is.na(as.integer(args))) ||
# (as.integer(args) > 4) || (as.integer(args) < 1)) {
# stop("The arg must be integer between 1-4!\n1 - NaiveBayes\n2 - rpartXse",
# "\n3 - JRip\n4 - IBk")
# } else {
args <- 1
setWorkspace()
source("functions.R")
source("utils.R")
source("crossValidation.R")
initGlobalVariables()
defines()
medias_c1_s <- cleanVector(medias_c1_s)
medias_c1_v <- cleanVector(medias_c1_v)
medias_c2 <- cleanVector(medias_c2)
medias_self <- cleanVector(medias_self)
cl <- as.integer(args)
param <- whichDB(classifiers[cl])
ini_cr <- param$cr
ini_bd <- param$bd
for(i in ini_bd:2) { #Iris
base_original <- getDatabase(i)
k_NN <- attKValue(base_original)
qtd_exem_por_classe <- ddply(base_original, ~class, summarise,
distinct_orders = length(class))
qtd_exem_menor_classe <- trunc(min(qtd_exem_por_classe$distinct_orders) * 0.1)
folds <- crossValidation(base_original, base_original$class)
for (cr in 5:5) { #2
for(j in 1:5) { #1
taxa <- j * 5 # 5%
acc_c1_s <- cleanVector(acc_c1_s)
acc_c1_v <- cleanVector(acc_c1_v)
acc_c2 <- cleanVector(acc_c2)
acc_self <- cleanVector(acc_self)
for (fold in 1:length(folds)) {
base_teste <- base_original[folds[[fold]], ]
base <- base_original[- folds[[fold]], ]
treinamento <<- base_rotulada_treino <- base
#sorteando os exemplos que ficarão rotulados inicialmente
cat("\nBD:", i, " CL:", cl, " CR:", cr, " TX:", j, " FOLD:",
fold)
H2 <- holdout(base_rotulada_treino$class, ratio = (taxa / 100),
mode = "stratified")
ids_treino_rot <- H2$tr
base <- newBase(base_rotulada_treino, ids_treino_rot)
base_rotulados_ini <- base_rotulada_treino[ids_treino_rot, ]
source('training.R')
}
# medias_c1_s <- appendVectors(medias_c1_s, acc_c1_s)
# medias_c1_v <- appendVectors(medias_c1_v, acc_c1_v)
# medias_c2 <- appendVectors(medias_c2, acc_c2)
medias_self <- appendVectors(medias_self, acc_self)
}
data_arquivo_o <- data.frame(bd_g_o, tx_g_o, it_g_o, thrConf_g_o,
nr_added_exs_g_o, acertou_g_o)
outputArchive(cr, as.character(classifiers[cl]), medias_c1_s, medias_c1_v,
medias_c2, medias_self)
write.csv(data_arquivo_o, paste(c("resultado", classifiers[cl], "095.csv"),
collapse = "_"), row.names = FALSE)
# medias_c1_s <- cleanVector(medias_c1_s)
# medias_c1_v <- cleanVector(medias_c1_v)
# medias_c2 <- cleanVector(medias_c2)
medias_self <- cleanVector(medias_self)
}
# if(ini_cr != 2) {
# ini_cr = 2
# }
}
# }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/insane-package.R
\docType{package}
\name{insane-package}
\alias{insane}
\alias{insane-package}
\title{insane: INsulin Secretion ANalysEr}
\description{
A user-friendly interface, using Shiny, to analyse glucose-stimulated insulin secretion (GSIS) assays in pancreatic beta cells or islets. The package allows the user to import several sets of experiments from different spreadsheets and to perform subsequent steps: summarise in a tidy format, visualise data quality and compare experimental conditions without omitting to account for technical confounders such as the date of the experiment or the technician. Together, insane is a comprehensive method that optimises pre-processing and analyses of GSIS experiments in a friendly-user interface. The Shiny App was initially designed for EndoC-betaH1 cell line following method described in Ndiaye et al., 2017 (\doi{10.1016/j.molmet.2017.03.011}).
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/mcanouil/insane/}
\item \url{https://m.canouil.dev/insane/}
\item Report bugs at \url{https://github.com/mcanouil/insane/issues/}
}
}
\author{
\strong{Maintainer}: Mickaël Canouil \email{pro@mickael.canouil.dev} (\href{https://orcid.org/0000-0002-3396-4549}{ORCID})
}
\keyword{internal}
| /man/insane-package.Rd | permissive | mcanouil/insane | R | false | true | 1,340 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/insane-package.R
\docType{package}
\name{insane-package}
\alias{insane}
\alias{insane-package}
\title{insane: INsulin Secretion ANalysEr}
\description{
A user-friendly interface, using Shiny, to analyse glucose-stimulated insulin secretion (GSIS) assays in pancreatic beta cells or islets. The package allows the user to import several sets of experiments from different spreadsheets and to perform subsequent steps: summarise in a tidy format, visualise data quality and compare experimental conditions without omitting to account for technical confounders such as the date of the experiment or the technician. Together, insane is a comprehensive method that optimises pre-processing and analyses of GSIS experiments in a friendly-user interface. The Shiny App was initially designed for EndoC-betaH1 cell line following method described in Ndiaye et al., 2017 (\doi{10.1016/j.molmet.2017.03.011}).
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/mcanouil/insane/}
\item \url{https://m.canouil.dev/insane/}
\item Report bugs at \url{https://github.com/mcanouil/insane/issues/}
}
}
\author{
\strong{Maintainer}: Mickaël Canouil \email{pro@mickael.canouil.dev} (\href{https://orcid.org/0000-0002-3396-4549}{ORCID})
}
\keyword{internal}
|
## PURPOSE: try 3rd party Addins
## devtools::install_github("rstudio/addinexamples", type = "source")
## position cursor on "function"
## with remap of Alt + 2 applies reshape
foo <- function(a, b, c) {
NULL
}
## position cursor on "list"
## run `addin Reshape Expression`, Alt + 2
list(
a=1,
b=list(2,3)
)
| /rmd/100_examples_rstudio_addins.R | no_license | jimrothstein/try_things_here | R | false | false | 329 | r | ## PURPOSE: try 3rd party Addins
## devtools::install_github("rstudio/addinexamples", type = "source")
## position cursor on "function"
## with remap of Alt + 2 applies reshape
foo <- function(a, b, c) {
NULL
}
## position cursor on "list"
## run `addin Reshape Expression`, Alt + 2
list(
a=1,
b=list(2,3)
)
|
#' Send an email message through the Mailgun API
#' @description Send an email message via
#' the Mailgun API. This requires an account
#' with Mailgun.
#' @param message the email message object,
#' as created by the \code{compose_email()}
#' function. The object's class is
#' \code{email_message}
#' @param subject the subject of the
#' email.
#' @param from the email address of the
#' sender. This does not have to be
#' the same email that is associated with
#' the account actually sending the message.
#' @param recipients a vector of email
#' addresses.
#' @param url the URL for the sending domain.
#' @param api_key the API key registered to
#' the mailgun service.
#' @examples
#' \dontrun{
#' # Create a simple email message using
#' # Markdown formatting
#' email <-
#' compose_email(
#' body = "
#' Hello!
#'
#' ## This a section heading
#'
#' We can use Markdown formatting \\
#' to **embolden** text or to add \\
#' *emphasis*. This is exciting, \\
#' right?
#'
#' Cheers")
#'
#' # Generate a vector of recipients
#' recipient_list <-
#' c("person_1@site.net",
#' "person_2@site.net")
#'
#' # Send it to multiple people through
#' # the Mailgun API
#' email %>%
#' send_by_mailgun(
#' subject = "Sent through Mailgun",
#' from = "The Sender <sender@send.org>",
#' recipients = recipient_list,
#' url = "<..mailgun_sending_domain..>",
#' api = "<..mailgun_api_key..>")
#' }
#' @import httr
#' @export send_by_mailgun
send_by_mailgun <- function(message,
subject = NULL,
from,
recipients,
url,
api_key) {
# Verify that the `message` object
# is of the class `email_message`
if (!inherits(x = message, what = "email_message")) {
stop("The object provided in `message` must be created by the `compose_email()` function.")
}
if (is.null(subject)) {
subject_text <- "<no subject>"
} else {
subject_text <- glue::glue(subject)
}
recipients <- paste(recipients, collapse = ", ")
# Post the message to Mailgun
httr::POST(
url = url,
authenticate("api", api_key),
encode = "form",
body = list(
from = from,
to = recipients,
subject = subject,
html = message$html_html))
}
| /R/send_by_mailgun.R | permissive | vfulco/blastula | R | false | false | 2,342 | r | #' Send an email message through the Mailgun API
#' @description Send an email message via
#' the Mailgun API. This requires an account
#' with Mailgun.
#' @param message the email message object,
#' as created by the \code{compose_email()}
#' function. The object's class is
#' \code{email_message}
#' @param subject the subject of the
#' email.
#' @param from the email address of the
#' sender. This does not have to be
#' the same email that is associated with
#' the account actually sending the message.
#' @param recipients a vector of email
#' addresses.
#' @param url the URL for the sending domain.
#' @param api_key the API key registered to
#' the mailgun service.
#' @examples
#' \dontrun{
#' # Create a simple email message using
#' # Markdown formatting
#' email <-
#' compose_email(
#' body = "
#' Hello!
#'
#' ## This a section heading
#'
#' We can use Markdown formatting \\
#' to **embolden** text or to add \\
#' *emphasis*. This is exciting, \\
#' right?
#'
#' Cheers")
#'
#' # Generate a vector of recipients
#' recipient_list <-
#' c("person_1@site.net",
#' "person_2@site.net")
#'
#' # Send it to multiple people through
#' # the Mailgun API
#' email %>%
#' send_by_mailgun(
#' subject = "Sent through Mailgun",
#' from = "The Sender <sender@send.org>",
#' recipients = recipient_list,
#' url = "<..mailgun_sending_domain..>",
#' api = "<..mailgun_api_key..>")
#' }
#' @import httr
#' @export send_by_mailgun
send_by_mailgun <- function(message,
subject = NULL,
from,
recipients,
url,
api_key) {
# Verify that the `message` object
# is of the class `email_message`
if (!inherits(x = message, what = "email_message")) {
stop("The object provided in `message` must be created by the `compose_email()` function.")
}
if (is.null(subject)) {
subject_text <- "<no subject>"
} else {
subject_text <- glue::glue(subject)
}
recipients <- paste(recipients, collapse = ", ")
# Post the message to Mailgun
httr::POST(
url = url,
authenticate("api", api_key),
encode = "form",
body = list(
from = from,
to = recipients,
subject = subject,
html = message$html_html))
}
|
library(data.table)
library(dplyr)
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
destfile <- "HAR.zip"
download.file(url, destfile = destfile)
unzip(destfile)
folder <- "UCI HAR Dataset"
# Read feature names, activity labels and
features <- fread(file.path(folder, "features.txt"), col.names = c("seq", "name"))
labels <- fread(file.path(folder, "activity_labels.txt"), col.names = c("cod", "name"))
# Load data sets
XTrain <- fread(file.path(folder, "train", "X_Train.txt"), col.names = features$name)
yTrain <- fread(file.path(folder, "train", "y_Train.txt"))
trainSubjects <- fread(file.path(folder, "train", "subject_train.txt"))
XTest <- fread(file.path(folder, "test", "X_Test.txt"), col.names = features$name)
yTest <- fread(file.path(folder, "test", "y_Test.txt"))
testSubjects <- fread(file.path(folder, "test", "subject_test.txt"))
originalDT <- rbind(XTrain, XTest)
# Filter the variables for mean and standard deviation
cols <- grep("(mean|std)\\(\\)", featDT$name, value = TRUE)
dataset <- originalDT %>% select(cols)
# Add new variables containing the activity and subject
dataset$activity <- labels$name[rbind(yTrain, yTest)$V1]
dataset$subject <- rbind(trainSubjects, testSubjects)
# Calculate average for each activity and subject
avgData <- aggregate(. ~ activity + subject, dataset, mean)
# Save the data
write.table(avgData, "averagedata.txt", row.names = FALSE) | /run_analysis.R | no_license | anisiomqs/Coursera-Getting-and-Cleaning-Data-Course-Project | R | false | false | 1,442 | r | library(data.table)
library(dplyr)
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
destfile <- "HAR.zip"
download.file(url, destfile = destfile)
unzip(destfile)
folder <- "UCI HAR Dataset"
# Read feature names, activity labels and
features <- fread(file.path(folder, "features.txt"), col.names = c("seq", "name"))
labels <- fread(file.path(folder, "activity_labels.txt"), col.names = c("cod", "name"))
# Load data sets
XTrain <- fread(file.path(folder, "train", "X_Train.txt"), col.names = features$name)
yTrain <- fread(file.path(folder, "train", "y_Train.txt"))
trainSubjects <- fread(file.path(folder, "train", "subject_train.txt"))
XTest <- fread(file.path(folder, "test", "X_Test.txt"), col.names = features$name)
yTest <- fread(file.path(folder, "test", "y_Test.txt"))
testSubjects <- fread(file.path(folder, "test", "subject_test.txt"))
originalDT <- rbind(XTrain, XTest)
# Filter the variables for mean and standard deviation
cols <- grep("(mean|std)\\(\\)", featDT$name, value = TRUE)
dataset <- originalDT %>% select(cols)
# Add new variables containing the activity and subject
dataset$activity <- labels$name[rbind(yTrain, yTest)$V1]
dataset$subject <- rbind(trainSubjects, testSubjects)
# Calculate average for each activity and subject
avgData <- aggregate(. ~ activity + subject, dataset, mean)
# Save the data
write.table(avgData, "averagedata.txt", row.names = FALSE) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iam_operations.R
\name{iam_add_role_to_instance_profile}
\alias{iam_add_role_to_instance_profile}
\title{Adds the specified IAM role to the specified instance profile}
\usage{
iam_add_role_to_instance_profile(InstanceProfileName, RoleName)
}
\arguments{
\item{InstanceProfileName}{[required] The name of the instance profile to update.
This parameter allows (through its \href{http://wikipedia.org/wiki/regex}{regex pattern}) a string of characters
consisting of upper and lowercase alphanumeric characters with no
spaces. You can also include any of the following characters: \\_+=,.@-}
\item{RoleName}{[required] The name of the role to add.
This parameter allows (through its \href{http://wikipedia.org/wiki/regex}{regex pattern}) a string of characters
consisting of upper and lowercase alphanumeric characters with no
spaces. You can also include any of the following characters: \\_+=,.@-}
}
\description{
Adds the specified IAM role to the specified instance profile. An
instance profile can contain only one role, and this limit cannot be
increased. You can remove the existing role and then add a different
role to an instance profile. You must then wait for the change to appear
across all of AWS because of \href{https://en.wikipedia.org/wiki/Eventual_consistency}{eventual consistency}. To
force the change, you must \href{https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DisassociateIamInstanceProfile.html}{disassociate the instance profile}
and then \href{https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateIamInstanceProfile.html}{associate the instance profile},
or you can stop your instance and then restart it.
}
\details{
The caller of this API must be granted the \code{PassRole} permission on the
IAM role by a permissions policy.
For more information about roles, go to \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html}{Working with Roles}.
For more information about instance profiles, go to \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html}{About Instance Profiles}.
}
\section{Request syntax}{
\preformatted{svc$add_role_to_instance_profile(
InstanceProfileName = "string",
RoleName = "string"
)
}
}
\examples{
\dontrun{
# The following command adds the role named S3Access to the instance
# profile named Webserver:
svc$add_role_to_instance_profile(
InstanceProfileName = "Webserver",
RoleName = "S3Access"
)
}
}
\keyword{internal}
| /cran/paws.security.identity/man/iam_add_role_to_instance_profile.Rd | permissive | johnnytommy/paws | R | false | true | 2,540 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iam_operations.R
\name{iam_add_role_to_instance_profile}
\alias{iam_add_role_to_instance_profile}
\title{Adds the specified IAM role to the specified instance profile}
\usage{
iam_add_role_to_instance_profile(InstanceProfileName, RoleName)
}
\arguments{
\item{InstanceProfileName}{[required] The name of the instance profile to update.
This parameter allows (through its \href{http://wikipedia.org/wiki/regex}{regex pattern}) a string of characters
consisting of upper and lowercase alphanumeric characters with no
spaces. You can also include any of the following characters: \\_+=,.@-}
\item{RoleName}{[required] The name of the role to add.
This parameter allows (through its \href{http://wikipedia.org/wiki/regex}{regex pattern}) a string of characters
consisting of upper and lowercase alphanumeric characters with no
spaces. You can also include any of the following characters: \\_+=,.@-}
}
\description{
Adds the specified IAM role to the specified instance profile. An
instance profile can contain only one role, and this limit cannot be
increased. You can remove the existing role and then add a different
role to an instance profile. You must then wait for the change to appear
across all of AWS because of \href{https://en.wikipedia.org/wiki/Eventual_consistency}{eventual consistency}. To
force the change, you must \href{https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DisassociateIamInstanceProfile.html}{disassociate the instance profile}
and then \href{https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateIamInstanceProfile.html}{associate the instance profile},
or you can stop your instance and then restart it.
}
\details{
The caller of this API must be granted the \code{PassRole} permission on the
IAM role by a permissions policy.
For more information about roles, go to \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html}{Working with Roles}.
For more information about instance profiles, go to \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html}{About Instance Profiles}.
}
\section{Request syntax}{
\preformatted{svc$add_role_to_instance_profile(
InstanceProfileName = "string",
RoleName = "string"
)
}
}
\examples{
\dontrun{
# The following command adds the role named S3Access to the instance
# profile named Webserver:
svc$add_role_to_instance_profile(
InstanceProfileName = "Webserver",
RoleName = "S3Access"
)
}
}
\keyword{internal}
|
#' Title: Text Organization for Bag of Words
#' Purpose: Learn some basic cleaning functions & term frequency
#' Author: Ted Kwartler
#' email: ehk116@gmail.com
#' License: GPL>=3
#' Date: 2019-4-29
#'
# Set the working directory
setwd("/cloud/project/B_Tuesday/data")
# Libs
library(tm)
library(qdap)
library(wordcloud)
library(RColorBrewer)
# Options & Functions
options(stringsAsFactors = FALSE)
Sys.setlocale('LC_ALL','C')
tryTolower <- function(x){
y = NA
try_error = tryCatch(tolower(x), error = function(e) e)
if (!inherits(try_error, 'error'))
y = tolower(x)
return(y)
}
cleanCorpus<-function(corpus, customStopwords){
corpus <- tm_map(corpus, content_transformer(qdapRegex::rm_url))
corpus <- tm_map(corpus, content_transformer(replace_contraction)) #new: isn't to is not
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, stripWhitespace)
corpus <- tm_map(corpus, content_transformer(tryTolower))
corpus <- tm_map(corpus, removeWords, customStopwords)
return(corpus)
}
# Create custom stop words
stops <- c(stopwords('english'), 'lol', 'amp', 'chardonnay')
# Bigram token maker
bigramTokens <-function(x){
unlist(lapply(NLP::ngrams(words(x), 2), paste, collapse = " "),
use.names = FALSE)
}
# Data
text <- read.csv('chardonnay.csv', header=TRUE)
# As of tm version 0.7-3 tabular was deprecated
names(text)[1]<-'doc_id'
# Make a volatile corpus
txtCorpus <- VCorpus(DataframeSource(text))
# Preprocess the corpus
txtCorpus <- cleanCorpus(txtCorpus, stops)
# Make bi-gram TDM according to the tokenize control & convert it to matrix
wineTDM <- TermDocumentMatrix(txtCorpus,
control=list(tokenize=bigramTokens))
wineTDMm <- as.matrix(wineTDM)
# See a bi-gram
exampleTweet <- grep('wine country', rownames(wineTDMm))
wineTDMm[(exampleTweet-2):(exampleTweet),870:871]
# Get Row Sums & organize
wineTDMv <- sort(rowSums(wineTDMm), decreasing = TRUE)
wineDF <- data.frame(word = names(wineTDMv), freq = wineTDMv)
# Review all Palettes
display.brewer.all()
# Choose a color & drop light ones
pal <- brewer.pal(8, "Purples")
pal <- pal[-(1:2)]
# Make simple word cloud
# Reminder to expand device pane
set.seed(1234)
wordcloud(wineDF$word,
wineDF$freq,
max.words = 50,
random.order = FALSE,
colors = pal,
scale = c(2,1))
# End | /B_Tuesday/scripts/C_SimpleWordCloud.R | permissive | rpplayground/GSERM_TextMining | R | false | false | 2,445 | r | #' Title: Text Organization for Bag of Words
#' Purpose: Learn some basic cleaning functions & term frequency
#' Author: Ted Kwartler
#' email: ehk116@gmail.com
#' License: GPL>=3
#' Date: 2019-4-29
#'
# Set the working directory
setwd("/cloud/project/B_Tuesday/data")
# Libs
library(tm)
library(qdap)
library(wordcloud)
library(RColorBrewer)
# Options & Functions
options(stringsAsFactors = FALSE)
Sys.setlocale('LC_ALL','C')
tryTolower <- function(x){
y = NA
try_error = tryCatch(tolower(x), error = function(e) e)
if (!inherits(try_error, 'error'))
y = tolower(x)
return(y)
}
cleanCorpus<-function(corpus, customStopwords){
corpus <- tm_map(corpus, content_transformer(qdapRegex::rm_url))
corpus <- tm_map(corpus, content_transformer(replace_contraction)) #new: isn't to is not
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, stripWhitespace)
corpus <- tm_map(corpus, content_transformer(tryTolower))
corpus <- tm_map(corpus, removeWords, customStopwords)
return(corpus)
}
# Create custom stop words
stops <- c(stopwords('english'), 'lol', 'amp', 'chardonnay')
# Bigram token maker
bigramTokens <-function(x){
unlist(lapply(NLP::ngrams(words(x), 2), paste, collapse = " "),
use.names = FALSE)
}
# Data
text <- read.csv('chardonnay.csv', header=TRUE)
# As of tm version 0.7-3 tabular was deprecated
names(text)[1]<-'doc_id'
# Make a volatile corpus
txtCorpus <- VCorpus(DataframeSource(text))
# Preprocess the corpus
txtCorpus <- cleanCorpus(txtCorpus, stops)
# Make bi-gram TDM according to the tokenize control & convert it to matrix
wineTDM <- TermDocumentMatrix(txtCorpus,
control=list(tokenize=bigramTokens))
wineTDMm <- as.matrix(wineTDM)
# See a bi-gram
exampleTweet <- grep('wine country', rownames(wineTDMm))
wineTDMm[(exampleTweet-2):(exampleTweet),870:871]
# Get Row Sums & organize
wineTDMv <- sort(rowSums(wineTDMm), decreasing = TRUE)
wineDF <- data.frame(word = names(wineTDMv), freq = wineTDMv)
# Review all Palettes
display.brewer.all()
# Choose a color & drop light ones
pal <- brewer.pal(8, "Purples")
pal <- pal[-(1:2)]
# Make simple word cloud
# Reminder to expand device pane
set.seed(1234)
wordcloud(wineDF$word,
wineDF$freq,
max.words = 50,
random.order = FALSE,
colors = pal,
scale = c(2,1))
# End |
library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.05,family="gaussian",standardize=TRUE)
sink('./Model/EN/ReliefF/breast/breast_021.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/ReliefF/breast/breast_021.R | no_license | leon1003/QSMART | R | false | false | 352 | r | library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.05,family="gaussian",standardize=TRUE)
sink('./Model/EN/ReliefF/breast/breast_021.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
## In this assignment we analyze data on fine particulate matter (PM2.5)
## and develop a set of graphs in png format displaying
## The data was provided as part of the Coursera Course Exploratory
## Data Analaysis from https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip
## which was originally downloaded on May 9, 2014.
## The data was collected from the EPA for the years 1999, 2002, 2005 and 2008.
## See README.md in the repository for further information and references about the
## data and background.
library(ggplot2)
library(utils)
library(plyr)
# Read the data
datafileName <- "summarySCC_PM25.rds"
classfileName <- "Source_Classification_Code.rds"
# See if we have the main data file and if not, download them and uzip
if (! file.exists(datafileName)) {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
destfile <- "exdata-data-NEI_data.zip"
# only download if the zip file not in local directory
if (! file.exists(destfile)) download.file(url, destfile = destfile, mode = "wb")
unzip(destfile)
}
# Read the two data files. NEI has the data and SCC has infomration about the SCC code
# for each observation.
NEI <- readRDS(datafileName)
SCC <- readRDS(classfileName)
# We will remove "." and "_" in SCC names to keep it tidy
names(SCC) <- gsub("[._]","",names(SCC))
# Make many columns factors since we don't need to process them as strings or numeric
colsToConvert <- names(NEI) != "Emissions"
NEI[ ,colsToConvert] <- lapply(NEI[ ,colsToConvert], factor)
# Basic Stats
dim(NEI)
dim(SCC)
summary(NEI)
unique(NEI$year)
# See if we have any missing data
sum(is.na(NEI))
# See if we have any emissions < 0 which would be inaccurate
sum((NEI$Emissions<0))
# Plot 2
BCNEI <- subset(NEI, fips == "24510")
BCTotalEmissions <- ddply(BCNEI, "year", summarize, Total_Emissions=sum(Emissions))
# Build an empty plot and then add attributes to it
with(BCTotalEmissions, {
bp <- barplot(names.arg=year, Total_Emissions,
main = bquote("Baltimore City Total PM"[2.5]~"Emissions by Year"),
ylab = bquote("PM"[2.5]~"emissions (tons)"),
xlab = "Year",
ylim = c(0, max(Total_Emissions)*1.05)) # simple scale to look better
# Note bp now holds the centers of each bar chart x axis
lines(bp, Total_Emissions, col = "blue", lwd = 2)
points(bp, Total_Emissions, col = "blue", pch = "X", lwd =2)
})
| /Plot2.R | no_license | Rashmi-Sinha/ExData_Plotting2 | R | false | false | 2,427 | r |
## In this assignment we analyze data on fine particulate matter (PM2.5)
## and develop a set of graphs in png format displaying
## The data was provided as part of the Coursera Course Exploratory
## Data Analaysis from https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip
## which was originally downloaded on May 9, 2014.
## The data was collected from the EPA for the years 1999, 2002, 2005 and 2008.
## See README.md in the repository for further information and references about the
## data and background.
library(ggplot2)
library(utils)
library(plyr)
# Read the data
datafileName <- "summarySCC_PM25.rds"
classfileName <- "Source_Classification_Code.rds"
# See if we have the main data file and if not, download them and uzip
if (! file.exists(datafileName)) {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
destfile <- "exdata-data-NEI_data.zip"
# only download if the zip file not in local directory
if (! file.exists(destfile)) download.file(url, destfile = destfile, mode = "wb")
unzip(destfile)
}
# Read the two data files. NEI has the data and SCC has infomration about the SCC code
# for each observation.
NEI <- readRDS(datafileName)
SCC <- readRDS(classfileName)
# We will remove "." and "_" in SCC names to keep it tidy
names(SCC) <- gsub("[._]","",names(SCC))
# Make many columns factors since we don't need to process them as strings or numeric
colsToConvert <- names(NEI) != "Emissions"
NEI[ ,colsToConvert] <- lapply(NEI[ ,colsToConvert], factor)
# Basic Stats
dim(NEI)
dim(SCC)
summary(NEI)
unique(NEI$year)
# See if we have any missing data
sum(is.na(NEI))
# See if we have any emissions < 0 which would be inaccurate
sum((NEI$Emissions<0))
# Plot 2
BCNEI <- subset(NEI, fips == "24510")
BCTotalEmissions <- ddply(BCNEI, "year", summarize, Total_Emissions=sum(Emissions))
# Build an empty plot and then add attributes to it
with(BCTotalEmissions, {
bp <- barplot(names.arg=year, Total_Emissions,
main = bquote("Baltimore City Total PM"[2.5]~"Emissions by Year"),
ylab = bquote("PM"[2.5]~"emissions (tons)"),
xlab = "Year",
ylim = c(0, max(Total_Emissions)*1.05)) # simple scale to look better
# Note bp now holds the centers of each bar chart x axis
lines(bp, Total_Emissions, col = "blue", lwd = 2)
points(bp, Total_Emissions, col = "blue", pch = "X", lwd =2)
})
|
## server.R file for the shiny app pertaining to Coursera Project "Developing Data Products"
# Load libraries
library(shiny)
library(datasets)
library(dplyr)
# Main code - shiny server function
shinyServer(function(input, output) {
# Displays cars that correspond to the filters
output$table <- renderDataTable({
disp_seq <- seq(from = input$disp[1], to = input$disp[2], by = 0.1)
hp_seq <- seq(from = input$hp[1], to = input$hp[2], by = 1)
data <- transmute(mtcars, Car = rownames(mtcars), MilesPerGallon = mpg,
GasolineExpenditure = input$dis/mpg*input$cost,
Cylinders = cyl, Displacement = disp, Horsepower = hp,
Transmission = am)
data <- filter(data, GasolineExpenditure <= input$gas, Cylinders %in% input$cyl,
Displacement %in% disp_seq, Horsepower %in% hp_seq, Transmission %in% input$am)
data <- mutate(data, Transmission = ifelse(Transmission==0, "Automatic", "Manual"))
data <- arrange(data, GasolineExpenditure)
data
},
options = list(lengthMenu = c(5, 15, 30), pageLength = 30))
})
| /server.R | no_license | vishanta/Developing_Data_Products | R | false | false | 1,172 | r | ## server.R file for the shiny app pertaining to Coursera Project "Developing Data Products"
# Load libraries
library(shiny)
library(datasets)
library(dplyr)
# Main code - shiny server function
shinyServer(function(input, output) {
# Displays cars that correspond to the filters
output$table <- renderDataTable({
disp_seq <- seq(from = input$disp[1], to = input$disp[2], by = 0.1)
hp_seq <- seq(from = input$hp[1], to = input$hp[2], by = 1)
data <- transmute(mtcars, Car = rownames(mtcars), MilesPerGallon = mpg,
GasolineExpenditure = input$dis/mpg*input$cost,
Cylinders = cyl, Displacement = disp, Horsepower = hp,
Transmission = am)
data <- filter(data, GasolineExpenditure <= input$gas, Cylinders %in% input$cyl,
Displacement %in% disp_seq, Horsepower %in% hp_seq, Transmission %in% input$am)
data <- mutate(data, Transmission = ifelse(Transmission==0, "Automatic", "Manual"))
data <- arrange(data, GasolineExpenditure)
data
},
options = list(lengthMenu = c(5, 15, 30), pageLength = 30))
})
|
# Calculate alpha diversity within different radii for BBS.
# in contrast to FIA, we can only use groups within the same year. 1997-present.
# Alpha diversity for all together since it is not necessary to do parallel
# In another script, load this output and take the averages by the different route subsets.
load('/mnt/research/nasabio/data/bbs/bbsworkspace_bystop_20072016.r')
source('/mnt/research/nasabio/code/pairwise_beta_focal.r')
load('/mnt/research/nasabio/data/bbs/bbspdfddist.r') # Phy and Func distance matrices.
source('/mnt/research/nasabio/code/fixpicante.r')
library(sp)
library(vegan)
nnull <- 999
# Get the alpha diversity for each stop
alpha_div <- diversity_3ways(m = bbsmat_oneyear, flavor = 'alpha',
dotd=T, dopd=T, dofd=T, abundance=F,
pddist = ericdist, fddist = birdtraitdist,
nnull = nnull,
phylo_spp = NULL, func_problem_spp = NULL, combine = FALSE)
bbs_alphadiv <- cbind(bbscov_oneyear, alpha_div)
write.csv(bbs_alphadiv, file = '/mnt/research/nasabio/data/bbs/biodiversity_CSVs/withinroute/bbs_withinroute_alphabystop.csv', row.names = FALSE)
| /run_compile_diversity/bbswithinroute/bbs_allstopsalpha.r | no_license | qdread/nasabio | R | false | false | 1,196 | r | # Calculate alpha diversity within different radii for BBS.
# in contrast to FIA, we can only use groups within the same year. 1997-present.
# Alpha diversity for all together since it is not necessary to do parallel
# In another script, load this output and take the averages by the different route subsets.
load('/mnt/research/nasabio/data/bbs/bbsworkspace_bystop_20072016.r')
source('/mnt/research/nasabio/code/pairwise_beta_focal.r')
load('/mnt/research/nasabio/data/bbs/bbspdfddist.r') # Phy and Func distance matrices.
source('/mnt/research/nasabio/code/fixpicante.r')
library(sp)
library(vegan)
nnull <- 999
# Get the alpha diversity for each stop
alpha_div <- diversity_3ways(m = bbsmat_oneyear, flavor = 'alpha',
dotd=T, dopd=T, dofd=T, abundance=F,
pddist = ericdist, fddist = birdtraitdist,
nnull = nnull,
phylo_spp = NULL, func_problem_spp = NULL, combine = FALSE)
bbs_alphadiv <- cbind(bbscov_oneyear, alpha_div)
write.csv(bbs_alphadiv, file = '/mnt/research/nasabio/data/bbs/biodiversity_CSVs/withinroute/bbs_withinroute_alphabystop.csv', row.names = FALSE)
|
## Horror movie data 2019-10-22
## https://github.com/rfordatascience/tidytuesday/tree/master/data/2019/2019-10-22
library(dplyr)
library(egg)
library(ggplot2)
library(ggimage)
library(lubridate)
library(readr)
library(stringr)
movies <- read_csv("horror_movies.csv")
## Parse dates
movies$release_parsed <- parse_date(movies$release_date,
format = "%d-%b-%y",
locale = locale("en"))
movies$release_year <- ifelse(is.na(movies$release_parsed),
movies$release_date,
year(movies$release_parsed))
movies$release_month <- month.abb[month(movies$release_parsed)]
## Parse budget
movies$budget_currency <- str_match(movies$budget,
"^[^0-9]+")[,1]
movies$budget_numeric <- as.numeric(gsub(movies$budget, pattern = "[^0-9]",
replacement = ""))
## Count films per year
count <- as.data.frame(table(movies$release_month))
colnames(count) <- c("release_month", "number")
## Linear model of average rating
model <- lm(review_rating ~ release_month, movies)
fit <- data.frame(release_month = month.abb,
predict(model,
newdata = data.frame(release_month = month.abb),
interval = "confidence"),
stringsAsFactors = FALSE)
grand_mean_rating <- mean(movies$review_rating,
na.rm = TRUE)
## Linear model of log budget
model_budget <- lm(log10(budget_numeric) ~ release_month, movies)
fit_budget <- data.frame(release_month = month.abb,
predict(model_budget,
newdata = data.frame(release_month = month.abb),
interval = "confidence"),
stringsAsFactors = FALSE)
## Plot of month
plot_number <- ggplot() +
geom_bar(aes(x = release_month,
y = number),
data = count,
stat = "identity",
fill = "grey") +
scale_x_discrete(limits = month.abb) +
theme_bw(base_size = 12) +
theme(panel.grid = element_blank()) +
xlab("") +
ylab("Number of films released")
plot_rating <- ggplot() +
geom_violin(aes(x = release_month,
y = review_rating),
fill = "grey",
colour = NA,
data = movies) +
scale_x_discrete(limits = month.abb) +
geom_pointrange(aes(x = release_month,
y = fit,
ymax = upr,
ymin = lwr),
data = fit) +
geom_hline(yintercept = grand_mean_rating,
linetype = 2,
colour = "red") +
ylim(0, 10) +
theme_bw(base_size = 12) +
theme(panel.grid = element_blank()) +
xlab("") +
ylab("Review rating")
plot_budget <- ggplot() +
geom_violin(aes(x = release_month,
y = log10(budget_numeric)),
data = filter(movies,
budget_currency == "$"),
fill = "grey",
colour = NA) +
geom_pointrange(aes(x = release_month,
y = fit,
ymax = upr,
ymin = lwr),
data = fit_budget) +
scale_x_discrete(limits = month.abb) +
scale_y_continuous(breaks = c(4, 6, 8),
labels = c(0.01, 1, 100)) +
theme_bw(base_size = 12) +
theme(panel.grid = element_blank()) +
xlab("Release month") +
ylab("Million dollars budget")
plot_combined <- ggarrange(plot_number,
plot_rating,
plot_budget,
top = "September and October are the horror film months")
pdf("movies.pdf")
print(plot_combined)
dev.off()
## Plot of day
movies$yday <- yday(movies$release_parsed)
daycount <- summarise(group_by(movies, yday, release_year), n = n())
halloween <- yday("2019-10-31")
pumpkin_data <- data.frame(x = halloween,
y = -1,
image = "pumpkin.png", stringsAsFactors = FALSE)
breaks <- yday(paste("2019-", 1:12, "-01", sep = ""))
plot_year <- ggplot() +
geom_point(aes(x = yday,
y = n),
colour = "green",
data = na.exclude(dc)) +
geom_image(aes(x = x,
y = y,
image = image),
data = pumpkin_data) +
facet_wrap(~ release_year,
ncol = 2) +
scale_x_continuous(breaks = breaks,
labels = month.abb) +
ylim(-3, NA) +
labs(caption = "Pumpkin icon by Good Ware from www.flatiron.com.") +
theme(panel.grid = element_blank(),
strip.background = element_blank(),
text = element_text(family = "mono",
colour = "grey",
size = 16),
axis.text = element_text(family = "mono",
colour = "green",
size = 14),
axis.ticks = element_line(colour = "green"),
strip.text = element_text(family = "mono",
colour = "grey",
size = 16),
plot.background = element_rect(fill = "black"),
panel.background = element_rect(fill = "black")) +
xlab("") +
ylab("Horror films released on this day") +
ggtitle("When horror films are released")
pdf("movies_year.pdf", width = 14)
print(plot_year)
dev.off()
| /tidytuesday/horror_movies.R | no_license | mrtnj/rstuff | R | false | false | 5,735 | r |
## Horror movie data 2019-10-22
## https://github.com/rfordatascience/tidytuesday/tree/master/data/2019/2019-10-22
library(dplyr)
library(egg)
library(ggplot2)
library(ggimage)
library(lubridate)
library(readr)
library(stringr)
movies <- read_csv("horror_movies.csv")
## Parse dates
movies$release_parsed <- parse_date(movies$release_date,
format = "%d-%b-%y",
locale = locale("en"))
movies$release_year <- ifelse(is.na(movies$release_parsed),
movies$release_date,
year(movies$release_parsed))
movies$release_month <- month.abb[month(movies$release_parsed)]
## Parse budget
movies$budget_currency <- str_match(movies$budget,
"^[^0-9]+")[,1]
movies$budget_numeric <- as.numeric(gsub(movies$budget, pattern = "[^0-9]",
replacement = ""))
## Count films per year
count <- as.data.frame(table(movies$release_month))
colnames(count) <- c("release_month", "number")
## Linear model of average rating
model <- lm(review_rating ~ release_month, movies)
fit <- data.frame(release_month = month.abb,
predict(model,
newdata = data.frame(release_month = month.abb),
interval = "confidence"),
stringsAsFactors = FALSE)
grand_mean_rating <- mean(movies$review_rating,
na.rm = TRUE)
## Linear model of log budget
model_budget <- lm(log10(budget_numeric) ~ release_month, movies)
fit_budget <- data.frame(release_month = month.abb,
predict(model_budget,
newdata = data.frame(release_month = month.abb),
interval = "confidence"),
stringsAsFactors = FALSE)
## Plot of month
plot_number <- ggplot() +
geom_bar(aes(x = release_month,
y = number),
data = count,
stat = "identity",
fill = "grey") +
scale_x_discrete(limits = month.abb) +
theme_bw(base_size = 12) +
theme(panel.grid = element_blank()) +
xlab("") +
ylab("Number of films released")
plot_rating <- ggplot() +
geom_violin(aes(x = release_month,
y = review_rating),
fill = "grey",
colour = NA,
data = movies) +
scale_x_discrete(limits = month.abb) +
geom_pointrange(aes(x = release_month,
y = fit,
ymax = upr,
ymin = lwr),
data = fit) +
geom_hline(yintercept = grand_mean_rating,
linetype = 2,
colour = "red") +
ylim(0, 10) +
theme_bw(base_size = 12) +
theme(panel.grid = element_blank()) +
xlab("") +
ylab("Review rating")
plot_budget <- ggplot() +
geom_violin(aes(x = release_month,
y = log10(budget_numeric)),
data = filter(movies,
budget_currency == "$"),
fill = "grey",
colour = NA) +
geom_pointrange(aes(x = release_month,
y = fit,
ymax = upr,
ymin = lwr),
data = fit_budget) +
scale_x_discrete(limits = month.abb) +
scale_y_continuous(breaks = c(4, 6, 8),
labels = c(0.01, 1, 100)) +
theme_bw(base_size = 12) +
theme(panel.grid = element_blank()) +
xlab("Release month") +
ylab("Million dollars budget")
plot_combined <- ggarrange(plot_number,
plot_rating,
plot_budget,
top = "September and October are the horror film months")
pdf("movies.pdf")
print(plot_combined)
dev.off()
## Plot of day
movies$yday <- yday(movies$release_parsed)
daycount <- summarise(group_by(movies, yday, release_year), n = n())
halloween <- yday("2019-10-31")
pumpkin_data <- data.frame(x = halloween,
y = -1,
image = "pumpkin.png", stringsAsFactors = FALSE)
breaks <- yday(paste("2019-", 1:12, "-01", sep = ""))
plot_year <- ggplot() +
geom_point(aes(x = yday,
y = n),
colour = "green",
data = na.exclude(dc)) +
geom_image(aes(x = x,
y = y,
image = image),
data = pumpkin_data) +
facet_wrap(~ release_year,
ncol = 2) +
scale_x_continuous(breaks = breaks,
labels = month.abb) +
ylim(-3, NA) +
labs(caption = "Pumpkin icon by Good Ware from www.flatiron.com.") +
theme(panel.grid = element_blank(),
strip.background = element_blank(),
text = element_text(family = "mono",
colour = "grey",
size = 16),
axis.text = element_text(family = "mono",
colour = "green",
size = 14),
axis.ticks = element_line(colour = "green"),
strip.text = element_text(family = "mono",
colour = "grey",
size = 16),
plot.background = element_rect(fill = "black"),
panel.background = element_rect(fill = "black")) +
xlab("") +
ylab("Horror films released on this day") +
ggtitle("When horror films are released")
pdf("movies_year.pdf", width = 14)
print(plot_year)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TimeSeriesExperiment-methods.R
\docType{methods}
\name{timeSeries}
\alias{timeSeries}
\alias{timeSeries,TimeSeriesExperiment-method}
\alias{timeSeries<-}
\alias{timeSeries<-,TimeSeriesExperiment-method}
\title{Time series formatted data.}
\usage{
timeSeries(object, ...)
\S4method{timeSeries}{TimeSeriesExperiment}(object, name = NULL)
timeSeries(object, ...) <- value
\S4method{timeSeries}{TimeSeriesExperiment}(object) <- value
}
\arguments{
\item{object}{a \code{TimeSeriesExperiment} object.}
\item{...}{argiments to other functions.}
\item{name}{a character string, one of 'ts', 'ts_with_lags', 'ts_collapsed'
and 'ts_collapsed_with_lags'. If NULL, all elements are returned.}
\item{value}{replacement list}
}
\value{
a \code{data.frame}
}
\description{
Getter and setter methods for \code{timeSeries} slot of
a \code{TimeSeriesExperiment} object.
}
\details{
\code{timeSeries} slot is a list with 'ts' and (optionally)
'ts_collapsed' storing data formatted as time-series/time-courses.
}
\examples{
data("endoderm_small")
endoderm_small <- makeTimeSeries(endoderm_small)
head(timeSeries(endoderm_small))
head(timeSeries(endoderm_small, name = 'ts'))
}
| /man/timeSeries.Rd | permissive | nlhuong/TimeSeriesExperiment | R | false | true | 1,244 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TimeSeriesExperiment-methods.R
\docType{methods}
\name{timeSeries}
\alias{timeSeries}
\alias{timeSeries,TimeSeriesExperiment-method}
\alias{timeSeries<-}
\alias{timeSeries<-,TimeSeriesExperiment-method}
\title{Time series formatted data.}
\usage{
timeSeries(object, ...)
\S4method{timeSeries}{TimeSeriesExperiment}(object, name = NULL)
timeSeries(object, ...) <- value
\S4method{timeSeries}{TimeSeriesExperiment}(object) <- value
}
\arguments{
\item{object}{a \code{TimeSeriesExperiment} object.}
\item{...}{argiments to other functions.}
\item{name}{a character string, one of 'ts', 'ts_with_lags', 'ts_collapsed'
and 'ts_collapsed_with_lags'. If NULL, all elements are returned.}
\item{value}{replacement list}
}
\value{
a \code{data.frame}
}
\description{
Getter and setter methods for \code{timeSeries} slot of
a \code{TimeSeriesExperiment} object.
}
\details{
\code{timeSeries} slot is a list with 'ts' and (optionally)
'ts_collapsed' storing data formatted as time-series/time-courses.
}
\examples{
data("endoderm_small")
endoderm_small <- makeTimeSeries(endoderm_small)
head(timeSeries(endoderm_small))
head(timeSeries(endoderm_small, name = 'ts'))
}
|
#' Save and combine (gather) results from batched R sessions
#'
#' `bsave()` is run from R; its companion function is to be run through the command line via `gather`, which is located in `Dropbox/bin`.
#'
#' @param res Results to save
#' @param suffix By default, files are named 'tmp1.rds', 'tmp2.rds', and so on. If `suffix='a'`, then files will be named 'tmp1-a.rds', 'tmp2-a.rds', etc. `gather` will then save results to '2018-11-07-a.rds'.
#'
#' @examples
#' # These examples are illustrative; for working examples, see Dropbox/bin/test
#'
#' A <- rnorm(100)
#'
#' # If run interactively
#' # bsave(A) # Saves to 2019-06-14.rds
#' # bsave(A, "A") # Saves to 2019-06-14-A.rds
#'
#' # If run in batch (with a number as first command line argument)
#' # bsave(A) # Saves to tmp1.rds; gathered to 2019-06-14.rds
#' # bsave(A, "A") # Saves to tmp1-A.rds; gathered to 2019-06-14-A.rds
#'
#' @export
bsave <- function(res, suffix="") {
Suffix <- if (nchar(suffix) > 0) paste("-",suffix,sep="") else suffix
NCA <- as.numeric(commandArgs(TRUE))
if (length(NCA)) {
saveRDS(res, file=paste0("tmp", NCA[1], Suffix, ".rds"))
} else {
saveRDS(res, file=paste0(Sys.Date(), Suffix,".rds"))
}
}
| /R/save-gather.r | no_license | pbreheny/breheny | R | false | false | 1,229 | r | #' Save and combine (gather) results from batched R sessions
#'
#' `bsave()` is run from R; its companion function is to be run through the command line via `gather`, which is located in `Dropbox/bin`.
#'
#' @param res Results to save
#' @param suffix By default, files are named 'tmp1.rds', 'tmp2.rds', and so on. If `suffix='a'`, then files will be named 'tmp1-a.rds', 'tmp2-a.rds', etc. `gather` will then save results to '2018-11-07-a.rds'.
#'
#' @examples
#' # These examples are illustrative; for working examples, see Dropbox/bin/test
#'
#' A <- rnorm(100)
#'
#' # If run interactively
#' # bsave(A) # Saves to 2019-06-14.rds
#' # bsave(A, "A") # Saves to 2019-06-14-A.rds
#'
#' # If run in batch (with a number as first command line argument)
#' # bsave(A) # Saves to tmp1.rds; gathered to 2019-06-14.rds
#' # bsave(A, "A") # Saves to tmp1-A.rds; gathered to 2019-06-14-A.rds
#'
#' @export
bsave <- function(res, suffix="") {
Suffix <- if (nchar(suffix) > 0) paste("-",suffix,sep="") else suffix
NCA <- as.numeric(commandArgs(TRUE))
if (length(NCA)) {
saveRDS(res, file=paste0("tmp", NCA[1], Suffix, ".rds"))
} else {
saveRDS(res, file=paste0(Sys.Date(), Suffix,".rds"))
}
}
|
library(mipfp)
### Name: gof.estimates
### Title: Wald, Log-likelihood ratio and Person Chi-square statistics for
### mipfp object
### Aliases: gof.estimates gof.estimates.default gof.estimates.mipfp
### Keywords: multivariate htest
### ** Examples
# loading the data
data(spnamur, package = "mipfp")
# subsetting the data frame, keeping only the first 3 variables
spnamur.sub <- subset(spnamur, select = Household.type:Prof.status)
# true table
true.table <- table(spnamur.sub)
# extracting the margins
tgt.v1 <- apply(true.table, 1, sum)
tgt.v1.v2 <- apply(true.table, c(1,2), sum)
tgt.v2.v3 <- apply(true.table, c(2,3), sum)
tgt.list.dims <- list(1, c(1,2), c(2,3))
tgt.data <- list(tgt.v1, tgt.v1.v2, tgt.v2.v3)
# creating the seed, a 10 pct sample of spnamur
seed.df <- spnamur.sub[sample(nrow(spnamur), round(0.10*nrow(spnamur))), ]
seed.table <- table(seed.df)
# applying one fitting method (ipfp)
r.ipfp <- Estimate(seed=seed.table, target.list=tgt.list.dims,
target.data = tgt.data)
# printing the G2, X2 and W2 statistics
print(gof.estimates(r.ipfp))
# alternative way (pretty printing, with p-values)
print(summary(r.ipfp)$stats.gof)
| /data/genthat_extracted_code/mipfp/examples/gof.estimates.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,194 | r | library(mipfp)
### Name: gof.estimates
### Title: Wald, Log-likelihood ratio and Person Chi-square statistics for
### mipfp object
### Aliases: gof.estimates gof.estimates.default gof.estimates.mipfp
### Keywords: multivariate htest
### ** Examples
# loading the data
data(spnamur, package = "mipfp")
# subsetting the data frame, keeping only the first 3 variables
spnamur.sub <- subset(spnamur, select = Household.type:Prof.status)
# true table
true.table <- table(spnamur.sub)
# extracting the margins
tgt.v1 <- apply(true.table, 1, sum)
tgt.v1.v2 <- apply(true.table, c(1,2), sum)
tgt.v2.v3 <- apply(true.table, c(2,3), sum)
tgt.list.dims <- list(1, c(1,2), c(2,3))
tgt.data <- list(tgt.v1, tgt.v1.v2, tgt.v2.v3)
# creating the seed, a 10 pct sample of spnamur
seed.df <- spnamur.sub[sample(nrow(spnamur), round(0.10*nrow(spnamur))), ]
seed.table <- table(seed.df)
# applying one fitting method (ipfp)
r.ipfp <- Estimate(seed=seed.table, target.list=tgt.list.dims,
target.data = tgt.data)
# printing the G2, X2 and W2 statistics
print(gof.estimates(r.ipfp))
# alternative way (pretty printing, with p-values)
print(summary(r.ipfp)$stats.gof)
|
\name{utild1filt}
\alias{utild1filt}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Gaussian Filter for Vectors
}
\description{
The function is applied to a pair of vectors as a lowpass gaussian filter to clean them from high frequency components
}
\usage{
utild1filt(arr0, arr1, outsize = 2, strong = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{arr0}{
vector to be filtered
}
\item{arr1}{
vector to be filtered
}
\item{outsize}{
radius of gaussian filter kernel
}
\item{strong}{
multiplication factor defining the sigma of gaussian filter kernel as \code{sigma=outsize*strong}
}
}
\value{
\item{filt0 }{result of arr0 filtration}
\item{filt1 }{result of arr1 filtration}
}
\author{
Vitaly Efremov <vitaly.efremov@dcu.ie>
}
\seealso{
\code{\link{utild2filt}}
}
\examples{
s1<-sin(c(1:128)*2)+2-c(1:128)*4/128
a<-utild1filt(arr0=s1, arr1=s1)
plot(s1, type='l')
lines(a$filt1, col='red')
s0<-c(rep(0,15), rep(2,12), rep(-2,12), rep(0,25))
s1<-c(rep(0,45), rep(2,4), rep(-2,4), rep(0,25))
a<-utild1filt(s0, s1, outsize=7, strong=.5)
plot(s0, type='l', ylab='s0, s1, a$filt0, a$filt1')
lines(s1, col='gray')
lines(a$filt0, col='red')
lines(a$filt1, col='purple')
}
| /man/utild1filt.Rd | no_license | cran/stheoreme | R | false | false | 1,238 | rd | \name{utild1filt}
\alias{utild1filt}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Gaussian Filter for Vectors
}
\description{
The function is applied to a pair of vectors as a lowpass gaussian filter to clean them from high frequency components
}
\usage{
utild1filt(arr0, arr1, outsize = 2, strong = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{arr0}{
vector to be filtered
}
\item{arr1}{
vector to be filtered
}
\item{outsize}{
radius of gaussian filter kernel
}
\item{strong}{
multiplication factor defining the sigma of gaussian filter kernel as \code{sigma=outsize*strong}
}
}
\value{
\item{filt0 }{result of arr0 filtration}
\item{filt1 }{result of arr1 filtration}
}
\author{
Vitaly Efremov <vitaly.efremov@dcu.ie>
}
\seealso{
\code{\link{utild2filt}}
}
\examples{
s1<-sin(c(1:128)*2)+2-c(1:128)*4/128
a<-utild1filt(arr0=s1, arr1=s1)
plot(s1, type='l')
lines(a$filt1, col='red')
s0<-c(rep(0,15), rep(2,12), rep(-2,12), rep(0,25))
s1<-c(rep(0,45), rep(2,4), rep(-2,4), rep(0,25))
a<-utild1filt(s0, s1, outsize=7, strong=.5)
plot(s0, type='l', ylab='s0, s1, a$filt0, a$filt1')
lines(s1, col='gray')
lines(a$filt0, col='red')
lines(a$filt1, col='purple')
}
|
## read just the data for 2007-02-01 and 2007-02-02
## starts on row 66638, includes 2880 rows
## include one extra row for this chart to get Saturday cut-off on the plots
febData <- read.table("household_power_consumption.txt", header = FALSE, na.strings = "?", sep = ";", skip = 66637, nrows = 2881)
## read and assign the column names
colnms <- read.table("household_power_consumption.txt", sep = ";", nrows = 1)
names(febData) <- unlist(colnms)
## convert Date column
febData$Date <- as.Date(febData$Date, "%d/%m/%Y")
## Add new column with date/time properly formatted
febData <- mutate(febData, formatted = as.POSIXct(paste(febData$Date, febData$Time)))
## include 4 plots on this device, fill by row
par(mfrow=c(2,2))
##-------------- #1
## plot global active power against new date/time column
plot(range(febData$formatted), range(febData$Global_active_power), type="n", ylab = "Global Active Power (kilowatts)", xlab="")
lines(febData$formatted, febData$Global_active_power)
##-------------- #2
plot(range(febData$formatted), range(febData$Voltage), type="n", ylab = "Voltage", xlab="datetime")
lines(febData$formatted, febData$Voltage)
##-------------- #3
## plot the 3 different sub meters against new date/time column
plot(range(febData$formatted), range(febData$Sub_metering_1, febData$Sub_metering_2, febData$Sub_metering_3), type="n", ylab = "Engergy sub metering", xlab="")
lines(febData$formatted, febData$Sub_metering_1, col = "black")
lines(febData$formatted, febData$Sub_metering_2, col = "red")
lines(febData$formatted, febData$Sub_metering_3, col = "blue")
## add a legend, reduce its size so it is visible on png
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lty = 1, bty = "n", cex = 0.7)
##-------------- #4
plot(range(febData$formatted), range(febData$Global_reactive_power), type="n", ylab = "Global_reactive_power", xlab="datetime")
lines(febData$formatted, febData$Global_reactive_power)
## copy plot to png file, default size is 480 x 480
dev.copy(png, "plot4.png")
dev.off() | /plot4.R | no_license | firmbeliever/MiscCourseraRWork | R | false | false | 2,132 | r | ## read just the data for 2007-02-01 and 2007-02-02
## starts on row 66638, includes 2880 rows
## include one extra row for this chart to get Saturday cut-off on the plots
febData <- read.table("household_power_consumption.txt", header = FALSE, na.strings = "?", sep = ";", skip = 66637, nrows = 2881)
## read and assign the column names
colnms <- read.table("household_power_consumption.txt", sep = ";", nrows = 1)
names(febData) <- unlist(colnms)
## convert Date column
febData$Date <- as.Date(febData$Date, "%d/%m/%Y")
## Add new column with date/time properly formatted
febData <- mutate(febData, formatted = as.POSIXct(paste(febData$Date, febData$Time)))
## include 4 plots on this device, fill by row
par(mfrow=c(2,2))
##-------------- #1
## plot global active power against new date/time column
plot(range(febData$formatted), range(febData$Global_active_power), type="n", ylab = "Global Active Power (kilowatts)", xlab="")
lines(febData$formatted, febData$Global_active_power)
##-------------- #2
plot(range(febData$formatted), range(febData$Voltage), type="n", ylab = "Voltage", xlab="datetime")
lines(febData$formatted, febData$Voltage)
##-------------- #3
## plot the 3 different sub meters against new date/time column
plot(range(febData$formatted), range(febData$Sub_metering_1, febData$Sub_metering_2, febData$Sub_metering_3), type="n", ylab = "Engergy sub metering", xlab="")
lines(febData$formatted, febData$Sub_metering_1, col = "black")
lines(febData$formatted, febData$Sub_metering_2, col = "red")
lines(febData$formatted, febData$Sub_metering_3, col = "blue")
## add a legend, reduce its size so it is visible on png
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lty = 1, bty = "n", cex = 0.7)
##-------------- #4
plot(range(febData$formatted), range(febData$Global_reactive_power), type="n", ylab = "Global_reactive_power", xlab="datetime")
lines(febData$formatted, febData$Global_reactive_power)
## copy plot to png file, default size is 480 x 480
dev.copy(png, "plot4.png")
dev.off() |
#' @title Confusion Matrix
#' @description Generate a confusion matrix for Random Forest analyses with
#' error rates translated into percent correctly classified, and columns for
#' confidence intervals and expected classification rates (priors) added.
#'
#' @param rf a \code{\link[randomForest]{randomForest}} object.
#' @param conf.level confidence level for the \code{\link{binom.test}} confidence interval
#' @param threshold threshold to test observed classification probability against.
#'
#' @author Eric Archer \email{eric.archer@@noaa.gov}
#'
#' @seealso \code{\link{classConfInt}}
#'
#' @examples
#' data(mtcars)
#'
#' rf <- randomForest(factor(am) ~ ., mtcars, importance = TRUE)
#' confusionMatrix(rf)
#'
#' @export
#'
confusionMatrix <- function(rf, conf.level = 0.95, threshold = 0.8) {
conf <- rf$confusion
# Strip error rate column
conf <- conf[, -ncol(conf)]
# Get confidence intervals
ci <- classConfInt(rf, conf.level = conf.level, threshold = threshold)
# Get expected error rate (prior)
prior <- exptd.err.rate(rf)
prior <- (1 - prior[c(2:length(prior), 1)]) * 100
# Add rows and columns
conf <- rbind(conf, Overall = rep(NA, ncol(conf)))
cbind(conf, ci * 100, Prior = prior)
} | /R/confusionMatrix.R | no_license | katielong/rfPermute | R | false | false | 1,235 | r | #' @title Confusion Matrix
#' @description Generate a confusion matrix for Random Forest analyses with
#' error rates translated into percent correctly classified, and columns for
#' confidence intervals and expected classification rates (priors) added.
#'
#' @param rf a \code{\link[randomForest]{randomForest}} object.
#' @param conf.level confidence level for the \code{\link{binom.test}} confidence interval
#' @param threshold threshold to test observed classification probability against.
#'
#' @author Eric Archer \email{eric.archer@@noaa.gov}
#'
#' @seealso \code{\link{classConfInt}}
#'
#' @examples
#' data(mtcars)
#'
#' rf <- randomForest(factor(am) ~ ., mtcars, importance = TRUE)
#' confusionMatrix(rf)
#'
#' @export
#'
confusionMatrix <- function(rf, conf.level = 0.95, threshold = 0.8) {
conf <- rf$confusion
# Strip error rate column
conf <- conf[, -ncol(conf)]
# Get confidence intervals
ci <- classConfInt(rf, conf.level = conf.level, threshold = threshold)
# Get expected error rate (prior)
prior <- exptd.err.rate(rf)
prior <- (1 - prior[c(2:length(prior), 1)]) * 100
# Add rows and columns
conf <- rbind(conf, Overall = rep(NA, ncol(conf)))
cbind(conf, ci * 100, Prior = prior)
} |
#This function is able to cache the invers of a matrix as special object.
makeCacheMatrix <- function(x = matrix()) {
inversa <- NULL
set <- function(y) {
x <<- y
inversa <<- NULL
}
get <- function() x
setInv <- function(solveMatrix) inversa <<- solveMatrix
getInv <- function() inversa
list(set = set, get = get,setInv = setInv, getInv = getInv)
}
## Write a short comment describing this function
#This function calculates the inverse of the matrix returned by makeCacheMatrix function defined before.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inversa <- x$getInv()
if(!is.null(inversa)){
message("Getting chache...hold on")
return(inversa)
}
data <- x$get()
inv <- solve(data)
x$setInv(inversa)
inversa
}
| /cachematrix.R | no_license | makise05/ProgrammingAssignment2 | R | false | false | 796 | r | #This function is able to cache the invers of a matrix as special object.
makeCacheMatrix <- function(x = matrix()) {
inversa <- NULL
set <- function(y) {
x <<- y
inversa <<- NULL
}
get <- function() x
setInv <- function(solveMatrix) inversa <<- solveMatrix
getInv <- function() inversa
list(set = set, get = get,setInv = setInv, getInv = getInv)
}
## Write a short comment describing this function
#This function calculates the inverse of the matrix returned by makeCacheMatrix function defined before.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inversa <- x$getInv()
if(!is.null(inversa)){
message("Getting chache...hold on")
return(inversa)
}
data <- x$get()
inv <- solve(data)
x$setInv(inversa)
inversa
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heatmap_auc.R
\name{.expand_study}
\alias{.expand_study}
\title{Expand study section for \code{SignatureInfoTraining}}
\usage{
.expand_study(GSE_sig)
}
\arguments{
\item{GSE_sig}{A \code{data.frame} contains information about each signature
and its training/discovery dataset(s) name. Default is \code{NULL}.}
}
\value{
A expand \code{data.frame} for gene signatures and dataset.
}
\description{
Expand study section for \code{SignatureInfoTraining}
}
| /man/dot-expand_study.Rd | permissive | SpadeKKK/curatedTBData | R | false | true | 530 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heatmap_auc.R
\name{.expand_study}
\alias{.expand_study}
\title{Expand study section for \code{SignatureInfoTraining}}
\usage{
.expand_study(GSE_sig)
}
\arguments{
\item{GSE_sig}{A \code{data.frame} contains information about each signature
and its training/discovery dataset(s) name. Default is \code{NULL}.}
}
\value{
A expand \code{data.frame} for gene signatures and dataset.
}
\description{
Expand study section for \code{SignatureInfoTraining}
}
|
#' String to numeric vector
#'
#' @param string A string containing numbers
#' @param sep The separator between numbers
#'
#' @export
str2vec <- function(string, sep = ' ') {
as.numeric(unlist(strsplit(as.character(string), sep)))
}
| /R/str2vec.R | no_license | rscherrer/EGS | R | false | false | 238 | r | #' String to numeric vector
#'
#' @param string A string containing numbers
#' @param sep The separator between numbers
#'
#' @export
str2vec <- function(string, sep = ' ') {
as.numeric(unlist(strsplit(as.character(string), sep)))
}
|
#' Estimate time-varying measures and forecast
#'
#' @param nowcast A nowcast as produced by `nowcast_pipeline`
#' @param rt_windows Numeric vector, windows over which to estimate time-varying R. The best performing window will be
#' selected per serial interval sample by default (based on which window best forecasts current cases).
#' @param rate_window Numeric, the window to use to estimate the rate of spread.
#' @inheritParams estimate_R0
#' @return
#' @export
#' @importFrom tidyr gather nest unnest drop_na
#' @importFrom dplyr filter group_by ungroup mutate select summarise n group_split bind_rows arrange
#' @importFrom purrr safely compact map_dbl map pmap transpose
#' @importFrom HDInterval hdi
#' @importFrom furrr future_map
#' @importFrom data.table setDT
#' @examples
#'
epi_measures_pipeline <- function(nowcast = NULL,
serial_intervals = NULL,
min_est_date = NULL,
si_samples = NULL, rt_samples = NULL,
rt_windows = 7, rate_window = 7,
rt_prior = NULL, forecast_model = NULL,
horizon = NULL) {
## Estimate time-varying R0
safe_R0 <- purrr::safely(EpiNow::estimate_R0)
message("Estimate time-varying R0")
data_list <- dplyr::group_split(nowcast, type, sample, keep = TRUE)
estimates <- furrr::future_map(data_list, function(data) {
estimates <- safe_R0(cases = data,
serial_intervals = serial_intervals,
rt_prior = rt_prior,
si_samples = si_samples,
rt_samples = rt_samples,
windows = rt_windows,
min_est_date = min_est_date,
forecast_model = forecast_model,
horizon = horizon)[[1]]
if (!is.null(estimates$rts)) {
estimates$rts <- dplyr::mutate(estimates$rts[[1]], type = data$type[1],
sample = data$sample[1])
}
if (!is.null(estimates$cases)) {
estimates$cases <- dplyr::mutate(estimates$cases[[1]], type = data$type[1],
sample = data$sample[1])
}
return(estimates)
}, .progress = TRUE)
## Clean up NULL rt estimates and bind together
R0_estimates <- estimates %>%
purrr::map(~ .$rts) %>%
purrr::compact() %>%
dplyr::bind_rows()
## Generic HDI return function
return_hdi <- function(vect = NULL, mass = NULL, index = NULL) {
as.numeric(purrr::map_dbl(list(HDInterval::hdi(vect, credMass = mass)), ~ .[[index]]))
}
message("Summarising time-varying R0")
R0_estimates_sum <- data.table::setDT(R0_estimates)[, .(
bottom = return_hdi(R, 0.9, 1),
top = return_hdi(R, 0.9, 2),
lower = return_hdi(R, 0.5, 1),
upper = return_hdi(R, 0.5, 2),
median = median(R, na.rm = TRUE),
mean = mean(R, na.rm = TRUE),
std = sd(R, na.rm = TRUE),
prob_control = (sum(R < 1) / .N),
mean_window = mean(window),
sd_window = sd(window)),
by = .(type, date, rt_type)
][, R0_range := purrr::pmap(
list(mean, bottom, top, lower, upper),
function(mean, bottom, top, lower, upper) {
list(point = mean,
lower = bottom,
upper = top,
mid_lower = lower,
mid_upper = upper)
}),]
R0_estimates_sum <- dplyr::arrange(R0_estimates_sum, date)
message("Summarising forecast cases")
cases_forecast <- estimates %>%
purrr::map(~ .$cases) %>%
purrr::compact()
if (!(is.null(cases_forecast) | length(cases_forecast) == 0)) {
## Clean up case forecasts
cases_forecast <- cases_forecast %>%
dplyr::bind_rows()
## Summarise case forecasts
sum_cases_forecast <- data.table::setDT(cases_forecast)[, .(
bottom = return_hdi(cases, 0.9, 1),
top = return_hdi(cases, 0.9, 2),
lower = return_hdi(cases, 0.5, 1),
upper = return_hdi(cases, 0.5, 2),
median = as.numeric(median(cases, na.rm = TRUE)),
mean = as.numeric(mean(cases, na.rm = TRUE)),
std = as.numeric(sd(cases, na.rm = TRUE))),
by = .(type, date, rt_type)
][, range := purrr::pmap(
list(mean, bottom, top),
function(mean, bottom, top) {
list(point = mean,
lower = bottom,
upper = top)
}),]
sum_cases_forecast <- dplyr::arrange(sum_cases_forecast, date)
}
## Estimate time-varying little r
message("Estimate time-varying rate of growth")
if (!is.null(min_est_date)) {
little_r_estimates <- nowcast %>%
dplyr::filter(date >= (min_est_date - lubridate::days(rate_window)))
}else{
little_r_estimates <- nowcast
}
## Sum across cases and imports
little_r_estimates <- little_r_estimates %>%
group_by(type, sample, date) %>%
dplyr::summarise(cases = sum(cases, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
tidyr::drop_na()
## Nest by type and sample then split by type only
little_r_estimates_list <- little_r_estimates %>%
dplyr::group_by(type, sample) %>%
tidyr::nest() %>%
dplyr::ungroup() %>%
dplyr::group_split(type, keep = TRUE)
## Pull out unique list
little_r_estimates_res <- little_r_estimates %>%
dplyr::select(type) %>%
unique()
## Estimate overall
little_r_estimates_res$overall_little_r <- furrr::future_map(little_r_estimates_list,
~ EpiNow::estimate_r_in_window(.$data),
.progress = TRUE)
## Estimate time-varying
little_r_estimates_res$time_varying_r <- furrr::future_map(little_r_estimates_list,
~ EpiNow::estimate_time_varying_r(.$data,
window = rate_window),
.progress = TRUE)
out <- list(R0_estimates_sum, little_r_estimates_res, R0_estimates)
names(out) <- c("R0", "rate_of_spread", "raw_R0")
if (!(is.null(cases_forecast) | length(cases_forecast) == 0)) {
out$case_forecast <- sum_cases_forecast
out$raw_case_forecast <- cases_forecast
}
return(out)
}
| /R/epi_measures_pipeline.R | permissive | akira-endo/EpiNow | R | false | false | 6,359 | r | #' Estimate time-varying measures and forecast
#'
#' @param nowcast A nowcast as produced by `nowcast_pipeline`
#' @param rt_windows Numeric vector, windows over which to estimate time-varying R. The best performing window will be
#' selected per serial interval sample by default (based on which window best forecasts current cases).
#' @param rate_window Numeric, the window to use to estimate the rate of spread.
#' @inheritParams estimate_R0
#' @return
#' @export
#' @importFrom tidyr gather nest unnest drop_na
#' @importFrom dplyr filter group_by ungroup mutate select summarise n group_split bind_rows arrange
#' @importFrom purrr safely compact map_dbl map pmap transpose
#' @importFrom HDInterval hdi
#' @importFrom furrr future_map
#' @importFrom data.table setDT
#' @examples
#'
epi_measures_pipeline <- function(nowcast = NULL,
serial_intervals = NULL,
min_est_date = NULL,
si_samples = NULL, rt_samples = NULL,
rt_windows = 7, rate_window = 7,
rt_prior = NULL, forecast_model = NULL,
horizon = NULL) {
## Estimate time-varying R0
safe_R0 <- purrr::safely(EpiNow::estimate_R0)
message("Estimate time-varying R0")
data_list <- dplyr::group_split(nowcast, type, sample, keep = TRUE)
estimates <- furrr::future_map(data_list, function(data) {
estimates <- safe_R0(cases = data,
serial_intervals = serial_intervals,
rt_prior = rt_prior,
si_samples = si_samples,
rt_samples = rt_samples,
windows = rt_windows,
min_est_date = min_est_date,
forecast_model = forecast_model,
horizon = horizon)[[1]]
if (!is.null(estimates$rts)) {
estimates$rts <- dplyr::mutate(estimates$rts[[1]], type = data$type[1],
sample = data$sample[1])
}
if (!is.null(estimates$cases)) {
estimates$cases <- dplyr::mutate(estimates$cases[[1]], type = data$type[1],
sample = data$sample[1])
}
return(estimates)
}, .progress = TRUE)
## Clean up NULL rt estimates and bind together
R0_estimates <- estimates %>%
purrr::map(~ .$rts) %>%
purrr::compact() %>%
dplyr::bind_rows()
## Generic HDI return function
return_hdi <- function(vect = NULL, mass = NULL, index = NULL) {
as.numeric(purrr::map_dbl(list(HDInterval::hdi(vect, credMass = mass)), ~ .[[index]]))
}
message("Summarising time-varying R0")
R0_estimates_sum <- data.table::setDT(R0_estimates)[, .(
bottom = return_hdi(R, 0.9, 1),
top = return_hdi(R, 0.9, 2),
lower = return_hdi(R, 0.5, 1),
upper = return_hdi(R, 0.5, 2),
median = median(R, na.rm = TRUE),
mean = mean(R, na.rm = TRUE),
std = sd(R, na.rm = TRUE),
prob_control = (sum(R < 1) / .N),
mean_window = mean(window),
sd_window = sd(window)),
by = .(type, date, rt_type)
][, R0_range := purrr::pmap(
list(mean, bottom, top, lower, upper),
function(mean, bottom, top, lower, upper) {
list(point = mean,
lower = bottom,
upper = top,
mid_lower = lower,
mid_upper = upper)
}),]
R0_estimates_sum <- dplyr::arrange(R0_estimates_sum, date)
message("Summarising forecast cases")
cases_forecast <- estimates %>%
purrr::map(~ .$cases) %>%
purrr::compact()
if (!(is.null(cases_forecast) | length(cases_forecast) == 0)) {
## Clean up case forecasts
cases_forecast <- cases_forecast %>%
dplyr::bind_rows()
## Summarise case forecasts
sum_cases_forecast <- data.table::setDT(cases_forecast)[, .(
bottom = return_hdi(cases, 0.9, 1),
top = return_hdi(cases, 0.9, 2),
lower = return_hdi(cases, 0.5, 1),
upper = return_hdi(cases, 0.5, 2),
median = as.numeric(median(cases, na.rm = TRUE)),
mean = as.numeric(mean(cases, na.rm = TRUE)),
std = as.numeric(sd(cases, na.rm = TRUE))),
by = .(type, date, rt_type)
][, range := purrr::pmap(
list(mean, bottom, top),
function(mean, bottom, top) {
list(point = mean,
lower = bottom,
upper = top)
}),]
sum_cases_forecast <- dplyr::arrange(sum_cases_forecast, date)
}
## Estimate time-varying little r
message("Estimate time-varying rate of growth")
if (!is.null(min_est_date)) {
little_r_estimates <- nowcast %>%
dplyr::filter(date >= (min_est_date - lubridate::days(rate_window)))
}else{
little_r_estimates <- nowcast
}
## Sum across cases and imports
little_r_estimates <- little_r_estimates %>%
group_by(type, sample, date) %>%
dplyr::summarise(cases = sum(cases, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
tidyr::drop_na()
## Nest by type and sample then split by type only
little_r_estimates_list <- little_r_estimates %>%
dplyr::group_by(type, sample) %>%
tidyr::nest() %>%
dplyr::ungroup() %>%
dplyr::group_split(type, keep = TRUE)
## Pull out unique list
little_r_estimates_res <- little_r_estimates %>%
dplyr::select(type) %>%
unique()
## Estimate overall
little_r_estimates_res$overall_little_r <- furrr::future_map(little_r_estimates_list,
~ EpiNow::estimate_r_in_window(.$data),
.progress = TRUE)
## Estimate time-varying
little_r_estimates_res$time_varying_r <- furrr::future_map(little_r_estimates_list,
~ EpiNow::estimate_time_varying_r(.$data,
window = rate_window),
.progress = TRUE)
out <- list(R0_estimates_sum, little_r_estimates_res, R0_estimates)
names(out) <- c("R0", "rate_of_spread", "raw_R0")
if (!(is.null(cases_forecast) | length(cases_forecast) == 0)) {
out$case_forecast <- sum_cases_forecast
out$raw_case_forecast <- cases_forecast
}
return(out)
}
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix creates a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of inverse of the matrix
## 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## The following function returns the inverse of the matrix. It first checks if
## the inverse has already been computed. If so, it gets the result and skips the
## computation. If not, it computes the inverse, sets the value in the cache via
## setinverse function.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | cguduru/ProgrammingAssignment2 | R | false | false | 1,097 | r | ## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix creates a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of inverse of the matrix
## 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## The following function returns the inverse of the matrix. It first checks if
## the inverse has already been computed. If so, it gets the result and skips the
## computation. If not, it computes the inverse, sets the value in the cache via
## setinverse function.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
source("R\\UCTSUpload.R")
# Regression tests for th UCTSUpload routine
#
# Test the password encryption function which should return "134060072035020251227029" for "A1B2c3d5"
#
#
stopifnot(EncryptPassword("A1B2c3d5") == "134060072035020251227029")
# Test the post string generation code
testData <- xts(x=c(1, 2.2, 3.12345, 4.5), order.by = as.Date(c("2014-04-22","2014-04-23","2014-04-24","2014-04-25")))
#Try uploading a real dataset
sPost <- UCTSUpload(TSCode="TSTEST01",
MGMTGroup="TEST",
freq = "D",
seriesName="Automatic Upload Test",
Units="par",
Decimals=2,
ActPer="Y",
freqConversion="END",
Alignment="MID",
Carry="NO",
PrimeCurr="U$",
tsData=testData,
strUsername=options()$Datastream.Username,
strPassword=options()$Datastream.Password)
sExpected <- TRUE
stopifnot(sPost == sExpected)
message("Unit tests passed")
# Test a dataset with an NaN in it
testData <- xts(x=c(1, 2.2, 3.12345, 14.5, NaN), order.by = as.Date(c("2013-01-01","2013-02-01","2013-03-01","2013-04-01","2013-05-01")))
sPost <- getTimeseries(testData,"M",2,"NA")
sExpected <- "1.00,2.20,3.12,14.50,NA,"
stopifnot(sPost == sExpected)
#Try uploading a real dataset
sPost <- UCTSUpload(TSCode="TSTEST01",
MGMTGroup="TEST",
freq = "M",
seriesName="Automatic Upload Test",
Units="par",
Decimals=2,
ActPer="Y",
freqConversion="END",
Alignment="MID",
Carry="NO",
PrimeCurr="U$",
tsData=testData,
strUsername=options()$Datastream.Username,
strPassword=options()$Datastream.Password)
sExpected <- TRUE
stopifnot(sPost == sExpected)
#Test the daily data
#Load some test data
load("Other\\f.RData")
fTest<-head(f$First,10)
# Test getTimeseries for the first 10 points
tData <- getTimeseries(Data=fTest, freq="D", digits=4, NA_VALUE="NA")
tDataExpected <- "0.8559,NA,NA,NA,0.8579,0.8512,0.8599,NA,NA,0.8596,NA,0.8393,0.8406,0.8274,0.8505,0.8444,"
stopifnot(tData == tDataExpected)
#Try a round trip and check if data is the same
sPost <- UCTSUpload(TSCode="TSTEST01",
MGMTGroup="TEST",
freq = "D",
seriesName="Automatic Upload Test",
Units="",
Decimals=3,
ActPer="Y",
freqConversion="END",
Alignment="END",
Carry="NO",
PrimeCurr="",
tsData=fTest,
strUsername=options()$Datastream.Username,
strPassword=options()$Datastream.Password)
stopifnot(sPost == TRUE) #Failed to upload
#Now lets download the data
dwei <- getDataStream(User=options()$Datastream.Username, Pass=options()$Datastream.Password)
sGet <- timeSeriesRequest(dwei = dwei,
DSCodes = "TSTEST01",
Instrument = "",
startDate = index(first(fTest)),
endDate = index(last(fTest)),
frequency = "D",
sStockList = sTest,
aTimeSeries = aTS,
verbose = FALSE)
#So success is aTS is the same as f$First
xResult <- cbind(round(fTest,digits=3),aTS) # Need to round to the same number of digits as in upload
colnames(xResult) <- c("Sent","Got")
stopifnot(!FALSE %in% as.vector(xResult$Sent==xResult$Got))
message("Unit tests passed")
| /Other/TestUCTSUpload.R | no_license | miceli/Datastream2R | R | false | false | 3,895 | r | source("R\\UCTSUpload.R")
# Regression tests for th UCTSUpload routine
#
# Test the password encryption function which should return "134060072035020251227029" for "A1B2c3d5"
#
#
stopifnot(EncryptPassword("A1B2c3d5") == "134060072035020251227029")
# Test the post string generation code
testData <- xts(x=c(1, 2.2, 3.12345, 4.5), order.by = as.Date(c("2014-04-22","2014-04-23","2014-04-24","2014-04-25")))
#Try uploading a real dataset
sPost <- UCTSUpload(TSCode="TSTEST01",
MGMTGroup="TEST",
freq = "D",
seriesName="Automatic Upload Test",
Units="par",
Decimals=2,
ActPer="Y",
freqConversion="END",
Alignment="MID",
Carry="NO",
PrimeCurr="U$",
tsData=testData,
strUsername=options()$Datastream.Username,
strPassword=options()$Datastream.Password)
sExpected <- TRUE
stopifnot(sPost == sExpected)
message("Unit tests passed")
# Test a dataset with an NaN in it
testData <- xts(x=c(1, 2.2, 3.12345, 14.5, NaN), order.by = as.Date(c("2013-01-01","2013-02-01","2013-03-01","2013-04-01","2013-05-01")))
sPost <- getTimeseries(testData,"M",2,"NA")
sExpected <- "1.00,2.20,3.12,14.50,NA,"
stopifnot(sPost == sExpected)
#Try uploading a real dataset
sPost <- UCTSUpload(TSCode="TSTEST01",
MGMTGroup="TEST",
freq = "M",
seriesName="Automatic Upload Test",
Units="par",
Decimals=2,
ActPer="Y",
freqConversion="END",
Alignment="MID",
Carry="NO",
PrimeCurr="U$",
tsData=testData,
strUsername=options()$Datastream.Username,
strPassword=options()$Datastream.Password)
sExpected <- TRUE
stopifnot(sPost == sExpected)
#Test the daily data
#Load some test data
load("Other\\f.RData")
fTest<-head(f$First,10)
# Test getTimeseries for the first 10 points
tData <- getTimeseries(Data=fTest, freq="D", digits=4, NA_VALUE="NA")
tDataExpected <- "0.8559,NA,NA,NA,0.8579,0.8512,0.8599,NA,NA,0.8596,NA,0.8393,0.8406,0.8274,0.8505,0.8444,"
stopifnot(tData == tDataExpected)
#Try a round trip and check if data is the same
sPost <- UCTSUpload(TSCode="TSTEST01",
MGMTGroup="TEST",
freq = "D",
seriesName="Automatic Upload Test",
Units="",
Decimals=3,
ActPer="Y",
freqConversion="END",
Alignment="END",
Carry="NO",
PrimeCurr="",
tsData=fTest,
strUsername=options()$Datastream.Username,
strPassword=options()$Datastream.Password)
stopifnot(sPost == TRUE) #Failed to upload
#Now lets download the data
dwei <- getDataStream(User=options()$Datastream.Username, Pass=options()$Datastream.Password)
sGet <- timeSeriesRequest(dwei = dwei,
DSCodes = "TSTEST01",
Instrument = "",
startDate = index(first(fTest)),
endDate = index(last(fTest)),
frequency = "D",
sStockList = sTest,
aTimeSeries = aTS,
verbose = FALSE)
#So success is aTS is the same as f$First
xResult <- cbind(round(fTest,digits=3),aTS) # Need to round to the same number of digits as in upload
colnames(xResult) <- c("Sent","Got")
stopifnot(!FALSE %in% as.vector(xResult$Sent==xResult$Got))
message("Unit tests passed")
|
library(import5eChar) # github.com/oganm/import5eChar
library(purrr)
library(readr)
library(glue)
library(digest)
library(dplyr)
library(XML)
library(ogbox) # github.com/oganm/ogbox
library(wizaRd) # github.com/oganm/wizaRd
library(stringr)
library(memoise)
library(rgeolocate)
library(here)
library(data.table)
library(randomIDs) # add friendlier names. github.com/oganm/randomIDs
library(jsonlite)
usethis::use_data_raw()
set_file_wd = function(){
command = commandArgs(trailingOnly = FALSE)
file = gsub('--file=','',command[grepl('--file',command)])
if(length(file) == 1){
setwd(dirname(file))
}
}
set_file_wd()
setwd(here())
# memoisation for quick access
# fc <- cache_filesystem("data-raw/memoiseCache")
# memoImportChar = memoise(importCharacter, cache = fc)
if(file.exists('memoImportChar.rds')){
memoImportChar = readRDS(here('memoImportChar.rds'))
} else {
memoImportChar = memoise(importCharacter)
saveRDS(memoImportChar,'memoImportChar.rds')
}
# get all char files saved everywhere. Yes I made a mess that I refused to fix...
charFiles = c(list.files('/srv/shiny-server/printSheetApp/chars/',full.names = TRUE),
list.files('/srv/shiny-server/interactiveSheet/chars/',full.names = TRUE),
list.files('/srv/shiny-server/chars',full.names = TRUE),
list.files('/srv/shiny-server/chars2', full.names = TRUE),
list.files('/srv/shiny-server/chars3', full.names = TRUE),
list.files('/srv/shiny-server/chars4', full.names = TRUE))
print('reading char files')
fileInfo = file.info(charFiles)
charFiles = charFiles[order(fileInfo$mtime)]
fileInfo = fileInfo[order(fileInfo$mtime),]
# use import5eChar to read the all of them
chars = charFiles %>% lapply(function(x){
memoImportChar(file = x)
})
saveRDS(memoImportChar,'memoImportChar.rds')
# get date information. dates before 2018-04-16 are not reliable
# get user fingerprint and IP
fileData = charFiles %>% basename %>% strsplit('_')
# add file and user info to the characters
print('constructing char table')
chars = lapply(1:length(chars),function(i){
char = chars[[i]]
char$date = fileInfo$mtime[i]
if(length(fileData[[i]]) == 1){
char$ip = 'NULL'
char$finger = 'NULL'
char$hash = fileData[[i]]
} else{
char$finger = fileData[[i]][1]
char$ip = fileData[[i]][2]
char$hash = fileData[[i]][3]
}
char
})
# setting the names to character name and class. this won't be exposed to others
names(chars) = chars %>% map_chr(function(x){
paste(x$Name,x$ClassField)
})
# create the table. it initially creates the table because that's what my original pipeline did... later I will convert the
# relevant bits into a list, making this a little silly.
charTable = chars %>% map(function(x){
data.frame(ip = x$ip,
finger = x$finger,
hash = x$hash,
name = x$Name,
race = x$Race,
background = x$Background,
date = x$date,
class = paste(x$classInfo[,1],x$classInfo[,3],collapse='|'),
justClass = x$classInfo[,'Class'] %>% paste(collapse ='|'),
subclass = x$classInfo[,'Archetype'] %>% paste(collapse ='|'),
classFreeText = x$ClassField,
level = x$classInfo[,'Level'] %>% as.integer() %>% sum,
feats = x$feats[x$feats !=''] %>% paste(collapse = '|'),
HP = x$currentHealth,
AC = AC(x),
Str = x$abilityScores['Str'],
Dex = x$abilityScores['Dex'],
Con = x$abilityScores['Con'],
Int = x$abilityScores['Int'],
Wis = x$abilityScores['Wis'],
Cha = x$abilityScores['Cha'],
alignment = x$Alignment,
skills = x$skillProf %>% which %>% names %>% paste(collapse = '|'),
weapons = x$weapons %>% map_chr('name') %>% gsub("\\|","",.) %>% paste(collapse = '|'),
spells = glue('{x$spells$name %>% gsub("\\\\*|\\\\|","",.)}*{x$spells$level}') %>% glue_collapse('|') %>% {if(length(.)!=1){return('')}else{return(.)}},
# day = x$date %>% format('%m %d %Y'),
castingStat = names(x$abilityMods[x$castingStatCode+1]),
choices = paste(gsub('\\||/|\\*','',names(x$classChoices)),
sapply(lapply(x$classChoices,gsub,pattern = '\\||/|\\*', replacement = ''),
paste,collapse = '*'),
sep = "/",collapse = '|'),
stringsAsFactors = FALSE)
}) %>% do.call(rbind,.)
# get rid of characters who start with the character generator but continue to level up by hand (unpaid users)
freeTextLevel = charTable$classFreeText %>% str_extract_all('[0-9]+') %>% lapply(as.integer) %>% sapply(sum)
charTable %<>% filter(!(level == 1 & freeTextLevel !=1)) %>%
filter(class!='')
charTable %<>% select(-classFreeText)
# remove multiple occurances of the same file
charTable %<>% arrange(desc(date)) %>% filter(!duplicated(hash))
if(file.exists('memoIPgeolocate.rds')){
memoIPgeolocate = readRDS(here('memoIPgeolocate.rds'))
} else {
memoIPgeolocate = memoise(ipapi::geolocate)
saveRDS(memoIPgeolocate,'memoIPgeolocate.rds')
}
ipLocations = charTable$ip %>%
lapply(memoIPgeolocate,.progress = FALSE) %>%
rbindlist(fill = TRUE)
saveRDS(memoIPgeolocate,here('memoIPgeolocate.rds'))
charTable$country = ipLocations$country
charTable$countryCode = ipLocations$countryCode
# some experimentation with user location.
# file <- system.file("extdata","GeoLite2-Country.mmdb", package = "rgeolocate")
# results <- maxmind(charTable$ip, file, c("continent_name", "country_code", "country_name"))
# post processing -----
# the way races are encoded in the app is a little silly. sub-races are
# not recorded separately. essentially race information is lost other
# than a text field after it's effects are applied during creation.
# The text field is also not too consistent. For instance if you are a
# variant half elf it'll simply say "Variant" but if you are a variant human
# it'll only say human
# here, I define regex that matches races.
# kind of an overkill as only few races actually required special care
races = c(Aarakocra = 'Aarakocra',
Aasimar = 'Aasimar',
Bugbear= 'Bugbear',
Dragonborn = 'Dragonborn',
Dwarf = 'Dwarf',
Elf = '(?<!Half-)Elf|Drow',
Firbolg = 'Firbolg',
Genasi= 'Genasi',
Gith = 'Geth|Githzerai',
Gnome = 'Gnome',
Goblin='^Goblin$',
Goliath = 'Goliath',
'Half-Elf' = '(^Half-Elf$)|(^Variant$)',
'Half-Orc' = 'Half-Orc',
Halfling = 'Halfling',
Hobgoblin = 'Hobgoblin$',
Human = 'Human|Variant Human',
Kenku = 'Kenku',
Kobold = 'Kobold',
Lizardfolk = 'Lizardfolk',
Orc = '(?<!Half-)Orc',
'Yaun-Ti' = 'Serpentblood|Yuan-Ti',
Tabaxi = 'Tabaxi',
Tiefling ='Tiefling|Lineage',
Triton = 'Triton',
Turtle = 'Turtle|Tortle',
Vedalken = 'Violetken|Vedalken',
Minotaur = 'Minotaur',
Centaur = 'Centaur',
Loxodon = 'Elephantine|Luxodon|Loxodon',
`Simic hybrid` = 'Animal Hybrid|Simic Hybrid',
Warforged = 'Warforged|Envoy|Juggernaut|Juggeenaut',
Changeling = 'Changeling',
Shifter = 'Shifter',
Kalashtar = 'Kalashtar',
Eladrin = 'Eladrin')
align = list(NG = c('ng',
'"good"',
'good',
'neuteral good',
'neitral good',
'neutral good',
'netual good',
'nuetral goodt',
'neutral/good',
'neutral-good',
'nuetral good',
'nutral good',
'n good',
'\U0001f937 neutral good',
'neutral goodsskkd',
'n/g',
'neutral good',
'n/b',
'nb',
'neutral bueno',
'n. good'),
CG = c('chaotic good',
'caotica buena',
'chaotic good.',
'caótico bueno',
'cb',
'cg',
'chacotic good',
'c/g',
'good chaotic'),
LG = c('lawful good',
'l/g',
'l-g',
'lg',
'lawfull good',
'lawful goodness',
'lawfully good',
'legal bueno',
'legal good',
'lb'),
NN = c('neutral',
'neutral neutral',
'netral',
'n',
'true neutral',
'tn',
'true-neutral',
'leal neutro',
'nuetral',
'neutral verdadero',
'neutro',
'true nuetral'),
CN = c('chaotic neutral',
'neutral caotico',
'caotic neutral',
'chaotic-neutral',
'c/n',
'caótico neutro',
'chaotic netural',
'chaotic',
'cn',
'chaotic nuetral',
'chatoic neutral',
'neutral chaotic',
'chaotic - neutral',
'chaotic neutrall',
'caotico neutral',
'caótico neutral',
"тру хаотик"),
LN = c('lawful neutral',
'lawfull neutral',
'legal neutral',
'lawful neitral',
'lawful',
'lawful/neutral',
'leal e neutro',
'lawful - neutral',
'ln',
'l/n',
'lawful neutral (good-ish)'),
NE = c('neutral evil','ne','n/e',
'neutral malvado',
'neutral maligno'),
LE = c('lawful evil',
'lawfuo evil',
'lawful evik',
'le',
'legal malvado',
'l/e'),
CE = c('ce',
'chaotic evil',
'caótico malvado',
'caotico maligno'
))
goodEvil = list(`E` = c('NE','LE','CE'),
`N` = c('LN','CN','NN'),
`G` = c('NG','LG','CG'))
lawfulChaotic = list(`C` = c('CN','CG','CE'),
`N` = c('NG','NE','NN'),
`L` = c('LG','LE','LN'))
# lists any alignment text I'm not processing
charTable$alignment %>% {.[!tolower(trimws(.)) %in% unlist(align)]} %>% table %>% sort %>% names %>% tolower %>% trimws
checkAlignment = function(x,legend){
x = names(legend)[findInList(tolower(trimws(x)),legend)]
if(length(x) == 0){
return('')
} else{
return(x)
}
}
charTable %<>% mutate(processedAlignment = alignment %>% purrr::map_chr(checkAlignment,align),
good = processedAlignment %>% purrr::map_chr(checkAlignment,goodEvil) %>%
factor(levels = c('E','N','G')),
lawful = processedAlignment %>%
purrr::map_chr(checkAlignment,lawfulChaotic) %>% factor(levels = c('C','N','L')))
charTable %<>% mutate(processedRace = race %>% sapply(function(x){
out = races %>% sapply(function(y){
grepl(pattern = y, x,perl = TRUE,ignore.case = TRUE)
}) %>% which %>% names
if(length(out) == 0 | length(out)>1){
out = ''
}
return(out)
}))
# lists any race text I'm not processing
charTable$processedRace[charTable$processedRace == ""] %>% names %>% table %>% sort
# process spells -----
spells = wizaRd::spells
spells = c(spells, list('.' = list(level = as.integer(99))))
class(spells) = 'list'
legitSpells =spells %>% names
trimPunct = function(char){
gsub('[[:punct:]]+','',char)
}
processedSpells = charTable$spells %>% sapply(function(x){
if(x==''){
return('')
}
spellNames = x %>% str_split('\\|') %>% {.[[1]]} %>% str_split('\\*') %>% map_chr(1)
spellLevels = x %>% str_split('\\|') %>% {.[[1]]} %>% str_split('\\*') %>% map_chr(2)
distanceMatrix = adist(tolower(spellNames), tolower(legitSpells),costs = list(ins=4, del=4, sub=6), counts = TRUE)
rownames(distanceMatrix) = spellNames
colnames(distanceMatrix) = legitSpells
predictedSpell = distanceMatrix %>% apply(1,which.min) %>% {legitSpells[.]}
distanceScores = distanceMatrix %>% apply(1,min)
predictedSpellLevel = spells[predictedSpell] %>% purrr::map_int('level')
ins = attributes(distanceMatrix)$counts[,distanceMatrix %>% apply(1,which.min),'ins'] %>% as.matrix %>% diag
del = attributes(distanceMatrix)$counts[,distanceMatrix %>% apply(1,which.min),'del'] %>% as.matrix %>% diag
sub = attributes(distanceMatrix)$counts[,distanceMatrix %>% apply(1,which.min),'sub'] %>% as.matrix %>% diag
# check if all words of the prediction is in the written spell
isItIn = predictedSpell %>% str_split(' |/') %>% map(function(x){
x[!x %in% c('and','or','of','to','the')]
}) %>%
{sapply(1:length(.),function(i){
all(sapply(trimPunct(tolower(.[[i]])),grepl,x =trimPunct(tolower(spellNames[i])),fixed = TRUE))
})}
# check if all words of the spell is in the prediction
isTheSpellIn = spellNames%>% str_split(' |/') %>% map(function(x){
x[!x %in% c('and','or','of','to','the')]
}) %>%
{sapply(1:length(.),function(i){
all(sapply(trimPunct(tolower(.[[i]])),grepl,x =trimPunct(tolower(predictedSpell[i])), fixed = TRUE))
})}
spellFrame = data.frame(spellNames,predictedSpell,spellLevels,predictedSpellLevel,distanceScores,ins,del,sub,isItIn,isTheSpellIn,stringsAsFactors = FALSE)
# special cases for some badly matched spells
if(any(trimws(tolower(spellFrame$spellNames)) == 'arcane hand' & spellFrame$spellLevels==5)){
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'arcane hand' & spellFrame$spellLevels==5,]$predictedSpell = "Bigby's Hand"
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'arcane hand' & spellFrame$spellLevels==5,]$isItIn = TRUE
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'arcane hand' & spellFrame$spellLevels==5,]$predictedSpellLevel = 5
}
if(any(trimws(tolower(spellFrame$spellNames)) == 'acid arrow' & spellFrame$spellLevels==2)){
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'acid arrow' & spellFrame$spellLevels==2,]$predictedSpell = "Melf's Acid Arrow"
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'acid arrow' & spellFrame$spellLevels==2,]$isItIn = TRUE
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'acid arrow' & spellFrame$spellLevels==2,]$predictedSpellLevel = 2
}
if(any(trimws(tolower(spellFrame$spellNames)) == 'hideaous laughter' & spellFrame$spellLevels==1)){
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'hideaous laughter' & spellFrame$spellLevels==1,]$predictedSpell = "Tasha's Hideous Laughter"
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'hideaous laughter' & spellFrame$spellLevels==1,]$isItIn = TRUE
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'hideaous laughter' & spellFrame$spellLevels==1,]$predictedSpellLevel = 1
}
# remove matches that don't satisfy the similarity criteria
spellFrame$predictedSpell[!(as.integer(spellFrame$spellLevels)==spellFrame$predictedSpellLevel &(spellFrame$isTheSpellIn | spellFrame$isItIn | (spellFrame$sub < 10 & spellFrame$del < 10 & spellFrame$ins < 10)))] = ''
spellFrame$predictedSpellLevel[!(as.integer(spellLevels)==predictedSpellLevel &(isTheSpellIn | isItIn | (sub < 10 & del < 10 & ins < 10)))] = ''
# spellFrame %<>% filter(as.integer(spellLevels)==predictedSpellLevel &(isTheSpellIn | isItIn | (sub < 5 & del < 5 & ins < 5)))
paste0(spellFrame$predictedSpell,'*',spellFrame$predictedSpellLevel,collapse ='|')
})
charTable$processedSpells = processedSpells
# manual checking of randomly selected data. select random spell/processed spell pairs. manually examine them to make sure
# they are allright and estimate accuracy.
withSpells = which(charTable$spells !='')
withSpells %>% lapply(function(i){
rawSpells = charTable$spells[i] %>% strsplit('\\|') %>% {.[[1]]}
pSpells = charTable$processedSpells[i] %>% strsplit('\\|') %>% {.[[1]]}
seq_along(rawSpells) %>% sapply(function(j){
c(i,rawSpells[j],pSpells[j])
}) %>% t
}) %>% do.call(rbind,.) -> spellProcessedPairs
# 200 random pairs
# spellProcessedPairs[spellProcessedPairs[,3] !='*' & spellProcessedPairs[,2] != spellProcessedPairs[,3],][sample(1:nrow(spellProcessedPairs[spellProcessedPairs[,3] !='*' & spellProcessedPairs[,2] != spellProcessedPairs[,3],]),200),]
# all spells that couldn't be matched
# spellProcessedPairs[spellProcessedPairs[,3] =='*',-3] %>% View
spellCount = spellProcessedPairs %>% nrow
standardSpellCount = nrow(spellProcessedPairs[spellProcessedPairs[,3] !='*' & spellProcessedPairs[,2] == spellProcessedPairs[,3],])
nonStandardSpellCount = nrow(spellProcessedPairs[spellProcessedPairs[,3] !='*' & spellProcessedPairs[,2] != spellProcessedPairs[,3],])
mismatchCount = spellProcessedPairs[spellProcessedPairs[,3] =='*',-3] %>% nrow
nonStandardSpellCount/spellCount * 100
mismatchCount/spellCount * 100
standardSpellCount/spellCount * 100
# x = 1:nrow(charTable) %>% sapply(function(i){adist(charTable$spells[i],charTable$processedSpells[i])}) %>% {.>20} %>% {charTable$spells[.]} %>% {.[43]}
# x = 1:nrow(charTable) %>% sapply(function(i){adist(charTable$spells[i],charTable$processedSpells[i])}) %>% {.>20} %>% {charTable$spells[.]} %>% {.[70]}
# x = 1:nrow(charTable) %>% sapply(function(i){adist(charTable$spells[i],charTable$processedSpells[i])}) %>% {.>20} %>% {charTable$spells[.]} %>% {.[88]}
# download.file('https://www.dropbox.com/s/4f7zdx09nkfa9as/Core.xml?dl=1',destfile = 'Core.xml')
# allRules = xmlParse('Core.xml') %>% xmlToList()
# fightClubItems = allRules[names(allRules) == 'item']
# saveRDS(fightClubItems,'fightClubItems.rds')
# fightClubItems = readRDS('fightClubItems.rds')
# names(fightClubItems) = allRules %>% map('name') %>% as.character
#
# fightClubItems %>% map_chr('type') %>% {. %in% 'M'} %>% {fightClubItems[.]} %>% map_chr('name')
# fightClubItems %>% map_chr('type') %>% {. %in% 'R'} %>% {fightClubItems[.]} %>% map_chr('name')
legitWeapons = c(# fightClubItems %>% map_chr('type') %>% {. %in% 'M'} %>% {fightClubItems[.]} %>% map_chr('name'),
# fightClubItems %>% map_chr('type') %>% {. %in% 'R'} %>% {fightClubItems[.]} %>% map_chr('name'),
'Crossbow, Light', 'Dart', 'Shortbow', 'Sling',
'Blowgun', 'Crossbow, hand', 'Crossbow, Heavy', 'Longbow', 'Net',
'Club','Dagger','Greatclub','Handaxe','Javelin','Light hammer','Mace','Quarterstaff','Sickle','Spear','Unarmed Strike',
'Battleaxe','Flail','Glaive','Greataxe','Greatsword','Halberd','Lance','Longsword','Maul','Morningstar','Pike','Rapier','Scimitar','Shortsword','Trident','War pick','Warhammer','Whip')
processedWeapons = charTable$weapons %>% sapply(function(x){
if(x==''){
return('')
}
weaponNames = x %>% str_split('\\|') %>% {.[[1]]}
distanceMatrix = adist(tolower(weaponNames), tolower(legitWeapons),costs = list(ins=2, del=2, sub=3), counts = TRUE)
rownames(distanceMatrix) = weaponNames
colnames(distanceMatrix) = legitWeapons
predictedWeapon = distanceMatrix %>% apply(1,which.min) %>% {legitWeapons[.]}
distanceScores = distanceMatrix %>% apply(1,min)
ins = attributes(distanceMatrix)$counts[,distanceMatrix %>% apply(1,which.min),'ins'] %>% as.matrix %>% diag
del = attributes(distanceMatrix)$counts[,distanceMatrix %>% apply(1,which.min),'del'] %>% as.matrix %>% diag
sub = attributes(distanceMatrix)$counts[,distanceMatrix %>% apply(1,which.min),'sub'] %>% as.matrix %>% diag
isItIn = predictedWeapon %>% str_split(' |/') %>% map(function(x){
x[!x %in% c('and','or','of','to','the')]
}) %>%
{sapply(1:length(.),function(i){
all(sapply(trimPunct(.[[i]]),grepl,x =trimPunct(weaponNames[i]),ignore.case=TRUE))
})}
isTheWeaponIn = weaponNames%>% str_split(' |/') %>% map(function(x){
x[!x %in% c('and','or','of','to','the')]
}) %>%
{sapply(1:length(.),function(i){
all(sapply(trimPunct(tolower(.[[i]])),grepl,x =trimPunct(tolower(predictedWeapon[i])), fixed = TRUE))
})}
weaponFrame = data.frame(weaponNames,predictedWeapon,distanceScores,ins,del,sub,isItIn,isTheWeaponIn,stringsAsFactors = FALSE)
if(any(trimPunct(trimws(tolower(weaponFrame$weaponNames))) == 'hand crossbow')){
weaponFrame[trimPunct(trimws(tolower(weaponFrame$weaponNames))) == 'hand crossbow',]$predictedWeapon = 'Crossbow, hand'
weaponFrame[trimPunct(trimws(tolower(weaponFrame$weaponNames))) == 'hand crossbow',]$isItIn = TRUE
}
if(any(trimPunct(trimws(tolower(weaponFrame$weaponNames))) == 'heavy crossbow')){
weaponFrame[trimPunct(trimws(tolower(weaponFrame$weaponNames))) == 'heavy crossbow',]$predictedWeapon = 'Crossbow, Heavy'
weaponFrame[trimPunct(trimws(tolower(weaponFrame$weaponNames))) == 'heavy crossbow',]$isItIn = TRUE
}
if(any(trimPunct(trimws(tolower(weaponFrame$weaponNames))) == '')){
weaponFrame[trimPunct(trimws(tolower(weaponFrame$weaponNames))) == '',]$predictedWeapon = ''
weaponFrame[trimPunct(trimws(tolower(weaponFrame$weaponNames))) == '',]$isItIn = TRUE
}
weaponFrame$predictedWeapon[!(weaponFrame$isTheWeaponIn | weaponFrame$isItIn | (weaponFrame$sub < 2 & weaponFrame$del<2 & weaponFrame$ins<2))] = ''
# weaponFrame %<>% filter(isItIn| (sub < 2 & del < 2 & ins < 2))
paste0(weaponFrame$predictedWeapon,collapse ='|')
})
charTable$processedWeapons = processedWeapons
# x = 1:nrow(charTable) %>% sapply(function(i){adist(charTable$weapons[i],charTable$processedWeapons[i])}) %>% {.>20} %>% {charTable$weapons[.]} %>% {.[10]}
withWeapons = which(charTable$weapons !='')
withWeapons %>% lapply(function(i){
rawWeapons = charTable$weapons[i] %>% stringr::str_split('\\|') %>% {.[[1]]}
pWeapons = charTable$processedWeapons[i] %>% stringr::str_split('\\|') %>% {.[[1]]}
seq_along(rawWeapons) %>% sapply(function(j){
c(i,rawWeapons[j],pWeapons[j])
}) %>% t
}) %>% do.call(rbind,.) -> weaponProcessedPairs
# weaponProcessedPairs[weaponProcessedPairs[,2] != weaponProcessedPairs[,3] & weaponProcessedPairs[,3]!='',] %>% {.[sample(nrow(.),200),]} %>% View
weaponCount = weaponProcessedPairs %>% nrow
standardWeaponCount = nrow(weaponProcessedPairs[weaponProcessedPairs[,2] == weaponProcessedPairs[,3],])
nonStandardWeaponCount = nrow(weaponProcessedPairs[weaponProcessedPairs[,2] != weaponProcessedPairs[,3] & weaponProcessedPairs[,3] !='',])
mismatchCount = weaponProcessedPairs[weaponProcessedPairs[,3] =='',] %>% nrow
nonStandardWeaponCount/weaponCount * 100
mismatchCount/weaponCount * 100
standardWeaponCount/weaponCount * 100
# user id ------
# userID = c()
# pb = txtProgressBar(min = 0, max = nrow(charTable), initial = 0)
#
# for(i in 1:nrow(charTable)){
# setTxtProgressBar(pb,i)
# for (id in unique(userID)){
# userChars = charTable[which(userID == id),]
# ip = charTable$ip[i] %>% {if(is.na(.) || . =='NULL' || .==''){return("NANA")}else{.}}
# finger = charTable$finger[i] %>% {if(is.na(.) || . =='NULL' ||. == ''){return("NANA")}else{.}}
# hash = charTable$hash[i] %>% {if(is.na(.) || . =='NULL' || . == ''){return("NANA")}else{.}}
#
# ipInUser = ip %in% userChars$ip
# fingerInUser = finger %in% userChars$finger
# hashInUser = hash %in% userChars$hash
# if(ipInUser | fingerInUser | hashInUser){
#
# userID = c(userID,id)
# break
# }
#
# }
#
# if(length(userID)!=i){
# userID = c(userID, max(c(userID,0))+1)
# }
# }
#
# charTable$userID = userID
#
#
# userID = c()
# pb = txtProgressBar(min = 0, max = nrow(charTable), initial = 0)
#
# for(i in 1:nrow(charTable)){
# setTxtProgressBar(pb,i)
# for (id in unique(userID)){
# userChars = charTable[which(userID == id),]
# ip = charTable$ip[i] %>% {if(is.na(.) || . =='NULL' || .==''){return("NANA")}else{.}}
# finger = charTable$finger[i] %>% {if(is.na(.) || . =='NULL' ||. == ''){return("NANA")}else{.}}
# hash = charTable$hash[i] %>% {if(is.na(.) || . =='NULL' || . == ''){return("NANA")}else{.}}
#
# ipInUser = ip %in% userChars$ip
# fingerInUser = finger %in% userChars$finger
# hashInUser = hash %in% userChars$hash
# if(fingerInUser | hashInUser){
#
# userID = c(userID,id)
# break
# }
#
# }
#
# if(length(userID)!=i){
# userID = c(userID, max(c(userID,0))+1)
# }
# }
#
# charTable$userIDNoIP = userID
# group levels at common feat acquisition points. sorry fighters and rogues
charTable %<>% mutate(levelGroup = cut(level,
breaks = c(0,3,7,11,15,18,20),
labels = c('1-3','4-7','8-11','12-15','16-18','19-20')))
# remove personal info -----------
shortestDigest = function(vector){
digested = vector(mode = 'character',length = length(vector))
digested[vector!=''] = vector[vector!=''] %>% map_chr(digest,'sha1')
uniqueDigested = digested[digested!=''] %>% unique
collusionLimit = 1:40 %>% sapply(function(i){
substr(uniqueDigested,40-i,40)%>% unique %>% length
}) %>% which.max %>% {.+1}
digested %<>% substr(40-collusionLimit,40)
return(digested)
}
charTable$name %<>% shortestDigest
charTable$ip %<>% shortestDigest
charTable$finger %<>% shortestDigest
# charTable %<>% select(-hash)
# unsecureFields = c('ip','finger','hash')
# charTable = charTable[!names(charTable) %in% unsecureFields]
# add friendly names ensure old names remain the same
# the hashes will actually change but their order of introduction shouldn't
set.seed(1)
uniqueNames = charTable %>% arrange(date) %$% name %>% unique
randomAlias = random_names(length(uniqueNames))
names(randomAlias) = uniqueNames
charTable %<>% mutate(alias = randomAlias[name])
dnd_chars_all = charTable
write_tsv(dnd_chars_all,path = here('data-raw/dnd_chars_all.tsv'))
# get unique table ----------------
getUniqueTable = function(charTable){
# remove obvious duplicates. same name and class assumed to be dups
# race is not considered in case same person is experimenting with different
# races
charTable %<>% filter(name !='')
uniqueTable = charTable %>% arrange(desc(level)) %>%
filter(!duplicated(paste(name,justClass))) %>%
filter(!level > 20)
# detect non unique characters that multiclassed
multiClassed = uniqueTable %>% filter(grepl('\\|',justClass))
singleClassed = uniqueTable %>% filter(!grepl('\\|',justClass))
multiClassDuplicates = multiClassed$name %>% duplicated %>% which
# this is somewhat of a heuristic since it only looks at total level and classes chosen
# but as both name and class combination is the same its probably some guy experimenting
# with different character ideas.
multiClassDuplicates %>% sapply(function(x){
thedup = multiClassed[x,]
matches = multiClassed[-x,] %>% filter(name == thedup$name)
higherLevel = thedup$level < matches$level
dupClass = strsplit(thedup$justClass,'\\|')[[1]]
matchClass = strsplit(matches$justClass,'\\|')
matchClass %>% sapply(function(y){
all(dupClass %in% y)
}) -> classMatches
any(classMatches & higherLevel)
}) -> isMultiClassDuplicate
if(length(multiClassDuplicates[isMultiClassDuplicate])>0){
multiClassed = multiClassed[-multiClassDuplicates[isMultiClassDuplicate],]
}
matchingNames = multiClassed$name[multiClassed$name %in% singleClassed$name] %>%
unique
singleCharDuplicates = which(singleClassed$name %in% matchingNames)
singleCharDuplicates %>% sapply(function(x){
char = singleClassed[x,]
# print(char[['name']])
multiChar = multiClassed %>%
filter(name %in% char[['name']] & grepl(char[['justClass']],justClass))
if(nrow(multiChar) == 0){
return (FALSE)
}
isHigher = any(multiChar$level > char[['level']])
if (nrow(multiChar)>1){
# warning("multiple matches")
}
return(isHigher)
}) -> isDuplicate
if(length(singleCharDuplicates[isDuplicate])>0){
singleClassed = singleClassed[-singleCharDuplicates[isDuplicate],]
}
uniqueTable = rbind(singleClassed,multiClassed)
return(list(uniqueTable = uniqueTable,
singleClassed = singleClassed,
multiClassed = multiClassed))
}
# dnd_chars_all = read_tsv(here("data-raw/dnd_chars_all.tsv"),na = 'NA') # redundant
usethis::use_data(dnd_chars_all,overwrite = TRUE)
list[dnd_chars_unique,dnd_chars_singleclass,dnd_chars_multiclass] = getUniqueTable(dnd_chars_all)
write_tsv(dnd_chars_unique,path = here('data-raw/dnd_chars_unique.tsv'))
usethis::use_data(dnd_chars_unique,overwrite = TRUE)
usethis::use_data(dnd_chars_singleclass,overwrite = TRUE)
usethis::use_data(dnd_chars_multiclass,overwrite = TRUE)
table2list = function(charTable){
seq_len(nrow(charTable)) %>% lapply(function(i){
char = charTable[i,]
list(ip = char$ip,
finger = char$finger,
name = list(
hash = char$name,
alias = char$alias),
race = list(
race = char$race,
processedRace = char$processedRace
),
background = char$background,
date = char$date,
class = seq_len(strsplit(char$class,'\\|') %>% {.[[1]]} %>% length) %>%
lapply(function(j){
list(
class = char$justClass %>% strsplit('\\|') %>% {out = .[[1]][j];if(is.na(out)){return('')}else{return(out)}},
subclass = char$subclass %>% strsplit('\\|') %>% {out = .[[1]][j];if(is.na(out)){return('')}else{return(out)}},
level = char$class %>% strsplit('\\|') %>% {.[[1]][j]} %>% str_extract('[0-9]+') %>% as.integer()
)
}) %>% {names(.) = strsplit(char$justClass,'\\|') %>% {.[[1]]};.},
level = char$level,
levelGroup = char$levelGroup,
feats = char$feats %>% strsplit('\\|') %>% {.[[1]]},
HP = char$HP,
AC = char$AC,
attributes = list(Str = char$Str,
Dex = char$Dex,
Con = char$Con,
Int = char$Int,
Wis = char$Wis,
Cha = char$Cha),
alignment = list(
alignment = char$alignment,
processedAlignment = char$processedAlignment,
lawful = char$lawful,
good = char$good
),
skills = char$skills %>% strsplit('\\|') %>% {.[[1]]},
weapons = seq_along(strsplit(char$weapons,'\\|') %>% {.[[1]]}) %>% lapply(function(j){
list(
weapon = char$weapons %>% strsplit('\\|') %>% {.[[1]][j]},
processedWeapon = char$processedWeapons %>% strsplit('\\|') %>% {.[[1]][j]}
)
}) %>% {names(.) = strsplit(char$weapons,'\\|') %>% {.[[1]]};.},
spells = seq_along(strsplit(char$spells,'\\|') %>% {.[[1]]}) %>% lapply(function(j){
list(
spell = char$spells %>% strsplit('\\|') %>% {.[[1]][j]} %>% strsplit('\\*') %>% {.[[1]][1]},
level = char$spells %>% strsplit('\\|') %>% {.[[1]][j]} %>% strsplit('\\*') %>% {.[[1]][2]},
processedSpell = char$processedSpells %>% strsplit('\\|') %>% {.[[1]][j]} %>% strsplit('\\*') %>% {.[[1]][1]}
)
}) %>% {names(.) = strsplit(char$spells,'\\|') %>% {.[[1]]};.},
castingStat = char$castingStat,
choices = seq_along(strsplit(char$choices,'\\|') %>% {.[[1]]}) %>% lapply(function(j){
char$choices %>% strsplit('\\|') %>% {.[[1]][j]} %>% strsplit('/') %>% {.[[1]][2]} %>% strsplit('\\*') %>% {.[[1]]}
}) %>% {names(.) = char$choices %>% strsplit('\\|') %>% unlist %>% strsplit('/') %>% map_chr(1);.},
location = list(country = char$country %>% as.character,
countryCode = char$countryCode %>% as.character),
hash = char$hash
)
}) %>% {names(.) = paste(charTable$alias,charTable$class);.}
}
dnd_chars_unique_list = table2list(dnd_chars_unique)
dnd_chars_singleclass_list = table2list(dnd_chars_singleclass)
dnd_chars_multiclass_list = table2list(dnd_chars_multiclass)
dnd_chars_all_list = table2list(dnd_chars_all)
usethis::use_data(dnd_chars_unique_list,overwrite = TRUE)
usethis::use_data(dnd_chars_singleclass_list,overwrite = TRUE)
usethis::use_data(dnd_chars_multiclass_list,overwrite = TRUE)
usethis::use_data(dnd_chars_all_list,overwrite = TRUE)
dnd_chars_unique_list %>% jsonlite::toJSON(pretty = TRUE) %>% writeLines(here('data-raw/dnd_chars_unique.json'))
dnd_chars_all_list %>% jsonlite::toJSON(pretty = TRUE) %>% writeLines(here('data-raw/dnd_chars_all.json'))
| /data-raw/dataProcess.R | permissive | jspickering/dnddata | R | false | false | 30,973 | r | library(import5eChar) # github.com/oganm/import5eChar
library(purrr)
library(readr)
library(glue)
library(digest)
library(dplyr)
library(XML)
library(ogbox) # github.com/oganm/ogbox
library(wizaRd) # github.com/oganm/wizaRd
library(stringr)
library(memoise)
library(rgeolocate)
library(here)
library(data.table)
library(randomIDs) # add friendlier names. github.com/oganm/randomIDs
library(jsonlite)
usethis::use_data_raw()
set_file_wd = function(){
command = commandArgs(trailingOnly = FALSE)
file = gsub('--file=','',command[grepl('--file',command)])
if(length(file) == 1){
setwd(dirname(file))
}
}
set_file_wd()
setwd(here())
# memoisation for quick access
# fc <- cache_filesystem("data-raw/memoiseCache")
# memoImportChar = memoise(importCharacter, cache = fc)
if(file.exists('memoImportChar.rds')){
memoImportChar = readRDS(here('memoImportChar.rds'))
} else {
memoImportChar = memoise(importCharacter)
saveRDS(memoImportChar,'memoImportChar.rds')
}
# get all char files saved everywhere. Yes I made a mess that I refused to fix...
charFiles = c(list.files('/srv/shiny-server/printSheetApp/chars/',full.names = TRUE),
list.files('/srv/shiny-server/interactiveSheet/chars/',full.names = TRUE),
list.files('/srv/shiny-server/chars',full.names = TRUE),
list.files('/srv/shiny-server/chars2', full.names = TRUE),
list.files('/srv/shiny-server/chars3', full.names = TRUE),
list.files('/srv/shiny-server/chars4', full.names = TRUE))
print('reading char files')
fileInfo = file.info(charFiles)
charFiles = charFiles[order(fileInfo$mtime)]
fileInfo = fileInfo[order(fileInfo$mtime),]
# use import5eChar to read the all of them
chars = charFiles %>% lapply(function(x){
memoImportChar(file = x)
})
saveRDS(memoImportChar,'memoImportChar.rds')
# get date information. dates before 2018-04-16 are not reliable
# get user fingerprint and IP
fileData = charFiles %>% basename %>% strsplit('_')
# add file and user info to the characters
print('constructing char table')
chars = lapply(1:length(chars),function(i){
char = chars[[i]]
char$date = fileInfo$mtime[i]
if(length(fileData[[i]]) == 1){
char$ip = 'NULL'
char$finger = 'NULL'
char$hash = fileData[[i]]
} else{
char$finger = fileData[[i]][1]
char$ip = fileData[[i]][2]
char$hash = fileData[[i]][3]
}
char
})
# setting the names to character name and class. this won't be exposed to others
names(chars) = chars %>% map_chr(function(x){
paste(x$Name,x$ClassField)
})
# create the table. it initially creates the table because that's what my original pipeline did... later I will convert the
# relevant bits into a list, making this a little silly.
charTable = chars %>% map(function(x){
data.frame(ip = x$ip,
finger = x$finger,
hash = x$hash,
name = x$Name,
race = x$Race,
background = x$Background,
date = x$date,
class = paste(x$classInfo[,1],x$classInfo[,3],collapse='|'),
justClass = x$classInfo[,'Class'] %>% paste(collapse ='|'),
subclass = x$classInfo[,'Archetype'] %>% paste(collapse ='|'),
classFreeText = x$ClassField,
level = x$classInfo[,'Level'] %>% as.integer() %>% sum,
feats = x$feats[x$feats !=''] %>% paste(collapse = '|'),
HP = x$currentHealth,
AC = AC(x),
Str = x$abilityScores['Str'],
Dex = x$abilityScores['Dex'],
Con = x$abilityScores['Con'],
Int = x$abilityScores['Int'],
Wis = x$abilityScores['Wis'],
Cha = x$abilityScores['Cha'],
alignment = x$Alignment,
skills = x$skillProf %>% which %>% names %>% paste(collapse = '|'),
weapons = x$weapons %>% map_chr('name') %>% gsub("\\|","",.) %>% paste(collapse = '|'),
spells = glue('{x$spells$name %>% gsub("\\\\*|\\\\|","",.)}*{x$spells$level}') %>% glue_collapse('|') %>% {if(length(.)!=1){return('')}else{return(.)}},
# day = x$date %>% format('%m %d %Y'),
castingStat = names(x$abilityMods[x$castingStatCode+1]),
choices = paste(gsub('\\||/|\\*','',names(x$classChoices)),
sapply(lapply(x$classChoices,gsub,pattern = '\\||/|\\*', replacement = ''),
paste,collapse = '*'),
sep = "/",collapse = '|'),
stringsAsFactors = FALSE)
}) %>% do.call(rbind,.)
# get rid of characters who start with the character generator but continue to level up by hand (unpaid users)
freeTextLevel = charTable$classFreeText %>% str_extract_all('[0-9]+') %>% lapply(as.integer) %>% sapply(sum)
charTable %<>% filter(!(level == 1 & freeTextLevel !=1)) %>%
filter(class!='')
charTable %<>% select(-classFreeText)
# remove multiple occurances of the same file
charTable %<>% arrange(desc(date)) %>% filter(!duplicated(hash))
if(file.exists('memoIPgeolocate.rds')){
memoIPgeolocate = readRDS(here('memoIPgeolocate.rds'))
} else {
memoIPgeolocate = memoise(ipapi::geolocate)
saveRDS(memoIPgeolocate,'memoIPgeolocate.rds')
}
ipLocations = charTable$ip %>%
lapply(memoIPgeolocate,.progress = FALSE) %>%
rbindlist(fill = TRUE)
saveRDS(memoIPgeolocate,here('memoIPgeolocate.rds'))
charTable$country = ipLocations$country
charTable$countryCode = ipLocations$countryCode
# some experimentation with user location.
# file <- system.file("extdata","GeoLite2-Country.mmdb", package = "rgeolocate")
# results <- maxmind(charTable$ip, file, c("continent_name", "country_code", "country_name"))
# post processing -----
# the way races are encoded in the app is a little silly. sub-races are
# not recorded separately. essentially race information is lost other
# than a text field after it's effects are applied during creation.
# The text field is also not too consistent. For instance if you are a
# variant half elf it'll simply say "Variant" but if you are a variant human
# it'll only say human
# here, I define regex that matches races.
# kind of an overkill as only few races actually required special care
races = c(Aarakocra = 'Aarakocra',
Aasimar = 'Aasimar',
Bugbear= 'Bugbear',
Dragonborn = 'Dragonborn',
Dwarf = 'Dwarf',
Elf = '(?<!Half-)Elf|Drow',
Firbolg = 'Firbolg',
Genasi= 'Genasi',
Gith = 'Geth|Githzerai',
Gnome = 'Gnome',
Goblin='^Goblin$',
Goliath = 'Goliath',
'Half-Elf' = '(^Half-Elf$)|(^Variant$)',
'Half-Orc' = 'Half-Orc',
Halfling = 'Halfling',
Hobgoblin = 'Hobgoblin$',
Human = 'Human|Variant Human',
Kenku = 'Kenku',
Kobold = 'Kobold',
Lizardfolk = 'Lizardfolk',
Orc = '(?<!Half-)Orc',
'Yaun-Ti' = 'Serpentblood|Yuan-Ti',
Tabaxi = 'Tabaxi',
Tiefling ='Tiefling|Lineage',
Triton = 'Triton',
Turtle = 'Turtle|Tortle',
Vedalken = 'Violetken|Vedalken',
Minotaur = 'Minotaur',
Centaur = 'Centaur',
Loxodon = 'Elephantine|Luxodon|Loxodon',
`Simic hybrid` = 'Animal Hybrid|Simic Hybrid',
Warforged = 'Warforged|Envoy|Juggernaut|Juggeenaut',
Changeling = 'Changeling',
Shifter = 'Shifter',
Kalashtar = 'Kalashtar',
Eladrin = 'Eladrin')
align = list(NG = c('ng',
'"good"',
'good',
'neuteral good',
'neitral good',
'neutral good',
'netual good',
'nuetral goodt',
'neutral/good',
'neutral-good',
'nuetral good',
'nutral good',
'n good',
'\U0001f937 neutral good',
'neutral goodsskkd',
'n/g',
'neutral good',
'n/b',
'nb',
'neutral bueno',
'n. good'),
CG = c('chaotic good',
'caotica buena',
'chaotic good.',
'caótico bueno',
'cb',
'cg',
'chacotic good',
'c/g',
'good chaotic'),
LG = c('lawful good',
'l/g',
'l-g',
'lg',
'lawfull good',
'lawful goodness',
'lawfully good',
'legal bueno',
'legal good',
'lb'),
NN = c('neutral',
'neutral neutral',
'netral',
'n',
'true neutral',
'tn',
'true-neutral',
'leal neutro',
'nuetral',
'neutral verdadero',
'neutro',
'true nuetral'),
CN = c('chaotic neutral',
'neutral caotico',
'caotic neutral',
'chaotic-neutral',
'c/n',
'caótico neutro',
'chaotic netural',
'chaotic',
'cn',
'chaotic nuetral',
'chatoic neutral',
'neutral chaotic',
'chaotic - neutral',
'chaotic neutrall',
'caotico neutral',
'caótico neutral',
"тру хаотик"),
LN = c('lawful neutral',
'lawfull neutral',
'legal neutral',
'lawful neitral',
'lawful',
'lawful/neutral',
'leal e neutro',
'lawful - neutral',
'ln',
'l/n',
'lawful neutral (good-ish)'),
NE = c('neutral evil','ne','n/e',
'neutral malvado',
'neutral maligno'),
LE = c('lawful evil',
'lawfuo evil',
'lawful evik',
'le',
'legal malvado',
'l/e'),
CE = c('ce',
'chaotic evil',
'caótico malvado',
'caotico maligno'
))
goodEvil = list(`E` = c('NE','LE','CE'),
`N` = c('LN','CN','NN'),
`G` = c('NG','LG','CG'))
lawfulChaotic = list(`C` = c('CN','CG','CE'),
`N` = c('NG','NE','NN'),
`L` = c('LG','LE','LN'))
# lists any alignment text I'm not processing
charTable$alignment %>% {.[!tolower(trimws(.)) %in% unlist(align)]} %>% table %>% sort %>% names %>% tolower %>% trimws
checkAlignment = function(x,legend){
x = names(legend)[findInList(tolower(trimws(x)),legend)]
if(length(x) == 0){
return('')
} else{
return(x)
}
}
charTable %<>% mutate(processedAlignment = alignment %>% purrr::map_chr(checkAlignment,align),
good = processedAlignment %>% purrr::map_chr(checkAlignment,goodEvil) %>%
factor(levels = c('E','N','G')),
lawful = processedAlignment %>%
purrr::map_chr(checkAlignment,lawfulChaotic) %>% factor(levels = c('C','N','L')))
charTable %<>% mutate(processedRace = race %>% sapply(function(x){
out = races %>% sapply(function(y){
grepl(pattern = y, x,perl = TRUE,ignore.case = TRUE)
}) %>% which %>% names
if(length(out) == 0 | length(out)>1){
out = ''
}
return(out)
}))
# lists any race text I'm not processing
charTable$processedRace[charTable$processedRace == ""] %>% names %>% table %>% sort
# process spells -----
spells = wizaRd::spells
spells = c(spells, list('.' = list(level = as.integer(99))))
class(spells) = 'list'
legitSpells =spells %>% names
trimPunct = function(char){
gsub('[[:punct:]]+','',char)
}
processedSpells = charTable$spells %>% sapply(function(x){
if(x==''){
return('')
}
spellNames = x %>% str_split('\\|') %>% {.[[1]]} %>% str_split('\\*') %>% map_chr(1)
spellLevels = x %>% str_split('\\|') %>% {.[[1]]} %>% str_split('\\*') %>% map_chr(2)
distanceMatrix = adist(tolower(spellNames), tolower(legitSpells),costs = list(ins=4, del=4, sub=6), counts = TRUE)
rownames(distanceMatrix) = spellNames
colnames(distanceMatrix) = legitSpells
predictedSpell = distanceMatrix %>% apply(1,which.min) %>% {legitSpells[.]}
distanceScores = distanceMatrix %>% apply(1,min)
predictedSpellLevel = spells[predictedSpell] %>% purrr::map_int('level')
ins = attributes(distanceMatrix)$counts[,distanceMatrix %>% apply(1,which.min),'ins'] %>% as.matrix %>% diag
del = attributes(distanceMatrix)$counts[,distanceMatrix %>% apply(1,which.min),'del'] %>% as.matrix %>% diag
sub = attributes(distanceMatrix)$counts[,distanceMatrix %>% apply(1,which.min),'sub'] %>% as.matrix %>% diag
# check if all words of the prediction is in the written spell
isItIn = predictedSpell %>% str_split(' |/') %>% map(function(x){
x[!x %in% c('and','or','of','to','the')]
}) %>%
{sapply(1:length(.),function(i){
all(sapply(trimPunct(tolower(.[[i]])),grepl,x =trimPunct(tolower(spellNames[i])),fixed = TRUE))
})}
# check if all words of the spell is in the prediction
isTheSpellIn = spellNames%>% str_split(' |/') %>% map(function(x){
x[!x %in% c('and','or','of','to','the')]
}) %>%
{sapply(1:length(.),function(i){
all(sapply(trimPunct(tolower(.[[i]])),grepl,x =trimPunct(tolower(predictedSpell[i])), fixed = TRUE))
})}
spellFrame = data.frame(spellNames,predictedSpell,spellLevels,predictedSpellLevel,distanceScores,ins,del,sub,isItIn,isTheSpellIn,stringsAsFactors = FALSE)
# special cases for some badly matched spells
if(any(trimws(tolower(spellFrame$spellNames)) == 'arcane hand' & spellFrame$spellLevels==5)){
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'arcane hand' & spellFrame$spellLevels==5,]$predictedSpell = "Bigby's Hand"
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'arcane hand' & spellFrame$spellLevels==5,]$isItIn = TRUE
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'arcane hand' & spellFrame$spellLevels==5,]$predictedSpellLevel = 5
}
if(any(trimws(tolower(spellFrame$spellNames)) == 'acid arrow' & spellFrame$spellLevels==2)){
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'acid arrow' & spellFrame$spellLevels==2,]$predictedSpell = "Melf's Acid Arrow"
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'acid arrow' & spellFrame$spellLevels==2,]$isItIn = TRUE
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'acid arrow' & spellFrame$spellLevels==2,]$predictedSpellLevel = 2
}
if(any(trimws(tolower(spellFrame$spellNames)) == 'hideaous laughter' & spellFrame$spellLevels==1)){
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'hideaous laughter' & spellFrame$spellLevels==1,]$predictedSpell = "Tasha's Hideous Laughter"
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'hideaous laughter' & spellFrame$spellLevels==1,]$isItIn = TRUE
spellFrame[trimws(tolower(spellFrame$spellNames)) == 'hideaous laughter' & spellFrame$spellLevels==1,]$predictedSpellLevel = 1
}
# remove matches that don't satisfy the similarity criteria
spellFrame$predictedSpell[!(as.integer(spellFrame$spellLevels)==spellFrame$predictedSpellLevel &(spellFrame$isTheSpellIn | spellFrame$isItIn | (spellFrame$sub < 10 & spellFrame$del < 10 & spellFrame$ins < 10)))] = ''
spellFrame$predictedSpellLevel[!(as.integer(spellLevels)==predictedSpellLevel &(isTheSpellIn | isItIn | (sub < 10 & del < 10 & ins < 10)))] = ''
# spellFrame %<>% filter(as.integer(spellLevels)==predictedSpellLevel &(isTheSpellIn | isItIn | (sub < 5 & del < 5 & ins < 5)))
paste0(spellFrame$predictedSpell,'*',spellFrame$predictedSpellLevel,collapse ='|')
})
charTable$processedSpells = processedSpells
# manual checking of randomly selected data. select random spell/processed spell pairs. manually examine them to make sure
# they are allright and estimate accuracy.
withSpells = which(charTable$spells !='')
withSpells %>% lapply(function(i){
rawSpells = charTable$spells[i] %>% strsplit('\\|') %>% {.[[1]]}
pSpells = charTable$processedSpells[i] %>% strsplit('\\|') %>% {.[[1]]}
seq_along(rawSpells) %>% sapply(function(j){
c(i,rawSpells[j],pSpells[j])
}) %>% t
}) %>% do.call(rbind,.) -> spellProcessedPairs
# 200 random pairs
# spellProcessedPairs[spellProcessedPairs[,3] !='*' & spellProcessedPairs[,2] != spellProcessedPairs[,3],][sample(1:nrow(spellProcessedPairs[spellProcessedPairs[,3] !='*' & spellProcessedPairs[,2] != spellProcessedPairs[,3],]),200),]
# all spells that couldn't be matched
# spellProcessedPairs[spellProcessedPairs[,3] =='*',-3] %>% View
spellCount = spellProcessedPairs %>% nrow
standardSpellCount = nrow(spellProcessedPairs[spellProcessedPairs[,3] !='*' & spellProcessedPairs[,2] == spellProcessedPairs[,3],])
nonStandardSpellCount = nrow(spellProcessedPairs[spellProcessedPairs[,3] !='*' & spellProcessedPairs[,2] != spellProcessedPairs[,3],])
mismatchCount = spellProcessedPairs[spellProcessedPairs[,3] =='*',-3] %>% nrow
nonStandardSpellCount/spellCount * 100
mismatchCount/spellCount * 100
standardSpellCount/spellCount * 100
# x = 1:nrow(charTable) %>% sapply(function(i){adist(charTable$spells[i],charTable$processedSpells[i])}) %>% {.>20} %>% {charTable$spells[.]} %>% {.[43]}
# x = 1:nrow(charTable) %>% sapply(function(i){adist(charTable$spells[i],charTable$processedSpells[i])}) %>% {.>20} %>% {charTable$spells[.]} %>% {.[70]}
# x = 1:nrow(charTable) %>% sapply(function(i){adist(charTable$spells[i],charTable$processedSpells[i])}) %>% {.>20} %>% {charTable$spells[.]} %>% {.[88]}
# download.file('https://www.dropbox.com/s/4f7zdx09nkfa9as/Core.xml?dl=1',destfile = 'Core.xml')
# allRules = xmlParse('Core.xml') %>% xmlToList()
# fightClubItems = allRules[names(allRules) == 'item']
# saveRDS(fightClubItems,'fightClubItems.rds')
# fightClubItems = readRDS('fightClubItems.rds')
# names(fightClubItems) = allRules %>% map('name') %>% as.character
#
# fightClubItems %>% map_chr('type') %>% {. %in% 'M'} %>% {fightClubItems[.]} %>% map_chr('name')
# fightClubItems %>% map_chr('type') %>% {. %in% 'R'} %>% {fightClubItems[.]} %>% map_chr('name')
legitWeapons = c(# fightClubItems %>% map_chr('type') %>% {. %in% 'M'} %>% {fightClubItems[.]} %>% map_chr('name'),
# fightClubItems %>% map_chr('type') %>% {. %in% 'R'} %>% {fightClubItems[.]} %>% map_chr('name'),
'Crossbow, Light', 'Dart', 'Shortbow', 'Sling',
'Blowgun', 'Crossbow, hand', 'Crossbow, Heavy', 'Longbow', 'Net',
'Club','Dagger','Greatclub','Handaxe','Javelin','Light hammer','Mace','Quarterstaff','Sickle','Spear','Unarmed Strike',
'Battleaxe','Flail','Glaive','Greataxe','Greatsword','Halberd','Lance','Longsword','Maul','Morningstar','Pike','Rapier','Scimitar','Shortsword','Trident','War pick','Warhammer','Whip')
processedWeapons = charTable$weapons %>% sapply(function(x){
if(x==''){
return('')
}
weaponNames = x %>% str_split('\\|') %>% {.[[1]]}
distanceMatrix = adist(tolower(weaponNames), tolower(legitWeapons),costs = list(ins=2, del=2, sub=3), counts = TRUE)
rownames(distanceMatrix) = weaponNames
colnames(distanceMatrix) = legitWeapons
predictedWeapon = distanceMatrix %>% apply(1,which.min) %>% {legitWeapons[.]}
distanceScores = distanceMatrix %>% apply(1,min)
ins = attributes(distanceMatrix)$counts[,distanceMatrix %>% apply(1,which.min),'ins'] %>% as.matrix %>% diag
del = attributes(distanceMatrix)$counts[,distanceMatrix %>% apply(1,which.min),'del'] %>% as.matrix %>% diag
sub = attributes(distanceMatrix)$counts[,distanceMatrix %>% apply(1,which.min),'sub'] %>% as.matrix %>% diag
isItIn = predictedWeapon %>% str_split(' |/') %>% map(function(x){
x[!x %in% c('and','or','of','to','the')]
}) %>%
{sapply(1:length(.),function(i){
all(sapply(trimPunct(.[[i]]),grepl,x =trimPunct(weaponNames[i]),ignore.case=TRUE))
})}
isTheWeaponIn = weaponNames%>% str_split(' |/') %>% map(function(x){
x[!x %in% c('and','or','of','to','the')]
}) %>%
{sapply(1:length(.),function(i){
all(sapply(trimPunct(tolower(.[[i]])),grepl,x =trimPunct(tolower(predictedWeapon[i])), fixed = TRUE))
})}
weaponFrame = data.frame(weaponNames,predictedWeapon,distanceScores,ins,del,sub,isItIn,isTheWeaponIn,stringsAsFactors = FALSE)
if(any(trimPunct(trimws(tolower(weaponFrame$weaponNames))) == 'hand crossbow')){
weaponFrame[trimPunct(trimws(tolower(weaponFrame$weaponNames))) == 'hand crossbow',]$predictedWeapon = 'Crossbow, hand'
weaponFrame[trimPunct(trimws(tolower(weaponFrame$weaponNames))) == 'hand crossbow',]$isItIn = TRUE
}
if(any(trimPunct(trimws(tolower(weaponFrame$weaponNames))) == 'heavy crossbow')){
weaponFrame[trimPunct(trimws(tolower(weaponFrame$weaponNames))) == 'heavy crossbow',]$predictedWeapon = 'Crossbow, Heavy'
weaponFrame[trimPunct(trimws(tolower(weaponFrame$weaponNames))) == 'heavy crossbow',]$isItIn = TRUE
}
if(any(trimPunct(trimws(tolower(weaponFrame$weaponNames))) == '')){
weaponFrame[trimPunct(trimws(tolower(weaponFrame$weaponNames))) == '',]$predictedWeapon = ''
weaponFrame[trimPunct(trimws(tolower(weaponFrame$weaponNames))) == '',]$isItIn = TRUE
}
weaponFrame$predictedWeapon[!(weaponFrame$isTheWeaponIn | weaponFrame$isItIn | (weaponFrame$sub < 2 & weaponFrame$del<2 & weaponFrame$ins<2))] = ''
# weaponFrame %<>% filter(isItIn| (sub < 2 & del < 2 & ins < 2))
paste0(weaponFrame$predictedWeapon,collapse ='|')
})
charTable$processedWeapons = processedWeapons
# x = 1:nrow(charTable) %>% sapply(function(i){adist(charTable$weapons[i],charTable$processedWeapons[i])}) %>% {.>20} %>% {charTable$weapons[.]} %>% {.[10]}
withWeapons = which(charTable$weapons !='')
withWeapons %>% lapply(function(i){
rawWeapons = charTable$weapons[i] %>% stringr::str_split('\\|') %>% {.[[1]]}
pWeapons = charTable$processedWeapons[i] %>% stringr::str_split('\\|') %>% {.[[1]]}
seq_along(rawWeapons) %>% sapply(function(j){
c(i,rawWeapons[j],pWeapons[j])
}) %>% t
}) %>% do.call(rbind,.) -> weaponProcessedPairs
# weaponProcessedPairs[weaponProcessedPairs[,2] != weaponProcessedPairs[,3] & weaponProcessedPairs[,3]!='',] %>% {.[sample(nrow(.),200),]} %>% View
weaponCount = weaponProcessedPairs %>% nrow
standardWeaponCount = nrow(weaponProcessedPairs[weaponProcessedPairs[,2] == weaponProcessedPairs[,3],])
nonStandardWeaponCount = nrow(weaponProcessedPairs[weaponProcessedPairs[,2] != weaponProcessedPairs[,3] & weaponProcessedPairs[,3] !='',])
mismatchCount = weaponProcessedPairs[weaponProcessedPairs[,3] =='',] %>% nrow
nonStandardWeaponCount/weaponCount * 100
mismatchCount/weaponCount * 100
standardWeaponCount/weaponCount * 100
# user id ------
# userID = c()
# pb = txtProgressBar(min = 0, max = nrow(charTable), initial = 0)
#
# for(i in 1:nrow(charTable)){
# setTxtProgressBar(pb,i)
# for (id in unique(userID)){
# userChars = charTable[which(userID == id),]
# ip = charTable$ip[i] %>% {if(is.na(.) || . =='NULL' || .==''){return("NANA")}else{.}}
# finger = charTable$finger[i] %>% {if(is.na(.) || . =='NULL' ||. == ''){return("NANA")}else{.}}
# hash = charTable$hash[i] %>% {if(is.na(.) || . =='NULL' || . == ''){return("NANA")}else{.}}
#
# ipInUser = ip %in% userChars$ip
# fingerInUser = finger %in% userChars$finger
# hashInUser = hash %in% userChars$hash
# if(ipInUser | fingerInUser | hashInUser){
#
# userID = c(userID,id)
# break
# }
#
# }
#
# if(length(userID)!=i){
# userID = c(userID, max(c(userID,0))+1)
# }
# }
#
# charTable$userID = userID
#
#
# userID = c()
# pb = txtProgressBar(min = 0, max = nrow(charTable), initial = 0)
#
# for(i in 1:nrow(charTable)){
# setTxtProgressBar(pb,i)
# for (id in unique(userID)){
# userChars = charTable[which(userID == id),]
# ip = charTable$ip[i] %>% {if(is.na(.) || . =='NULL' || .==''){return("NANA")}else{.}}
# finger = charTable$finger[i] %>% {if(is.na(.) || . =='NULL' ||. == ''){return("NANA")}else{.}}
# hash = charTable$hash[i] %>% {if(is.na(.) || . =='NULL' || . == ''){return("NANA")}else{.}}
#
# ipInUser = ip %in% userChars$ip
# fingerInUser = finger %in% userChars$finger
# hashInUser = hash %in% userChars$hash
# if(fingerInUser | hashInUser){
#
# userID = c(userID,id)
# break
# }
#
# }
#
# if(length(userID)!=i){
# userID = c(userID, max(c(userID,0))+1)
# }
# }
#
# charTable$userIDNoIP = userID
# group levels at common feat acquisition points. sorry fighters and rogues
charTable %<>% mutate(levelGroup = cut(level,
breaks = c(0,3,7,11,15,18,20),
labels = c('1-3','4-7','8-11','12-15','16-18','19-20')))
# remove personal info -----------
shortestDigest = function(vector){
digested = vector(mode = 'character',length = length(vector))
digested[vector!=''] = vector[vector!=''] %>% map_chr(digest,'sha1')
uniqueDigested = digested[digested!=''] %>% unique
collusionLimit = 1:40 %>% sapply(function(i){
substr(uniqueDigested,40-i,40)%>% unique %>% length
}) %>% which.max %>% {.+1}
digested %<>% substr(40-collusionLimit,40)
return(digested)
}
charTable$name %<>% shortestDigest
charTable$ip %<>% shortestDigest
charTable$finger %<>% shortestDigest
# charTable %<>% select(-hash)
# unsecureFields = c('ip','finger','hash')
# charTable = charTable[!names(charTable) %in% unsecureFields]
# add friendly names ensure old names remain the same
# the hashes will actually change but their order of introduction shouldn't
set.seed(1)
uniqueNames = charTable %>% arrange(date) %$% name %>% unique
randomAlias = random_names(length(uniqueNames))
names(randomAlias) = uniqueNames
charTable %<>% mutate(alias = randomAlias[name])
dnd_chars_all = charTable
write_tsv(dnd_chars_all,path = here('data-raw/dnd_chars_all.tsv'))
# get unique table ----------------
getUniqueTable = function(charTable){
# remove obvious duplicates. same name and class assumed to be dups
# race is not considered in case same person is experimenting with different
# races
charTable %<>% filter(name !='')
uniqueTable = charTable %>% arrange(desc(level)) %>%
filter(!duplicated(paste(name,justClass))) %>%
filter(!level > 20)
# detect non unique characters that multiclassed
multiClassed = uniqueTable %>% filter(grepl('\\|',justClass))
singleClassed = uniqueTable %>% filter(!grepl('\\|',justClass))
multiClassDuplicates = multiClassed$name %>% duplicated %>% which
# this is somewhat of a heuristic since it only looks at total level and classes chosen
# but as both name and class combination is the same its probably some guy experimenting
# with different character ideas.
multiClassDuplicates %>% sapply(function(x){
thedup = multiClassed[x,]
matches = multiClassed[-x,] %>% filter(name == thedup$name)
higherLevel = thedup$level < matches$level
dupClass = strsplit(thedup$justClass,'\\|')[[1]]
matchClass = strsplit(matches$justClass,'\\|')
matchClass %>% sapply(function(y){
all(dupClass %in% y)
}) -> classMatches
any(classMatches & higherLevel)
}) -> isMultiClassDuplicate
if(length(multiClassDuplicates[isMultiClassDuplicate])>0){
multiClassed = multiClassed[-multiClassDuplicates[isMultiClassDuplicate],]
}
matchingNames = multiClassed$name[multiClassed$name %in% singleClassed$name] %>%
unique
singleCharDuplicates = which(singleClassed$name %in% matchingNames)
singleCharDuplicates %>% sapply(function(x){
char = singleClassed[x,]
# print(char[['name']])
multiChar = multiClassed %>%
filter(name %in% char[['name']] & grepl(char[['justClass']],justClass))
if(nrow(multiChar) == 0){
return (FALSE)
}
isHigher = any(multiChar$level > char[['level']])
if (nrow(multiChar)>1){
# warning("multiple matches")
}
return(isHigher)
}) -> isDuplicate
if(length(singleCharDuplicates[isDuplicate])>0){
singleClassed = singleClassed[-singleCharDuplicates[isDuplicate],]
}
uniqueTable = rbind(singleClassed,multiClassed)
return(list(uniqueTable = uniqueTable,
singleClassed = singleClassed,
multiClassed = multiClassed))
}
# dnd_chars_all = read_tsv(here("data-raw/dnd_chars_all.tsv"),na = 'NA') # redundant
usethis::use_data(dnd_chars_all,overwrite = TRUE)
list[dnd_chars_unique,dnd_chars_singleclass,dnd_chars_multiclass] = getUniqueTable(dnd_chars_all)
write_tsv(dnd_chars_unique,path = here('data-raw/dnd_chars_unique.tsv'))
usethis::use_data(dnd_chars_unique,overwrite = TRUE)
usethis::use_data(dnd_chars_singleclass,overwrite = TRUE)
usethis::use_data(dnd_chars_multiclass,overwrite = TRUE)
table2list = function(charTable){
seq_len(nrow(charTable)) %>% lapply(function(i){
char = charTable[i,]
list(ip = char$ip,
finger = char$finger,
name = list(
hash = char$name,
alias = char$alias),
race = list(
race = char$race,
processedRace = char$processedRace
),
background = char$background,
date = char$date,
class = seq_len(strsplit(char$class,'\\|') %>% {.[[1]]} %>% length) %>%
lapply(function(j){
list(
class = char$justClass %>% strsplit('\\|') %>% {out = .[[1]][j];if(is.na(out)){return('')}else{return(out)}},
subclass = char$subclass %>% strsplit('\\|') %>% {out = .[[1]][j];if(is.na(out)){return('')}else{return(out)}},
level = char$class %>% strsplit('\\|') %>% {.[[1]][j]} %>% str_extract('[0-9]+') %>% as.integer()
)
}) %>% {names(.) = strsplit(char$justClass,'\\|') %>% {.[[1]]};.},
level = char$level,
levelGroup = char$levelGroup,
feats = char$feats %>% strsplit('\\|') %>% {.[[1]]},
HP = char$HP,
AC = char$AC,
attributes = list(Str = char$Str,
Dex = char$Dex,
Con = char$Con,
Int = char$Int,
Wis = char$Wis,
Cha = char$Cha),
alignment = list(
alignment = char$alignment,
processedAlignment = char$processedAlignment,
lawful = char$lawful,
good = char$good
),
skills = char$skills %>% strsplit('\\|') %>% {.[[1]]},
weapons = seq_along(strsplit(char$weapons,'\\|') %>% {.[[1]]}) %>% lapply(function(j){
list(
weapon = char$weapons %>% strsplit('\\|') %>% {.[[1]][j]},
processedWeapon = char$processedWeapons %>% strsplit('\\|') %>% {.[[1]][j]}
)
}) %>% {names(.) = strsplit(char$weapons,'\\|') %>% {.[[1]]};.},
spells = seq_along(strsplit(char$spells,'\\|') %>% {.[[1]]}) %>% lapply(function(j){
list(
spell = char$spells %>% strsplit('\\|') %>% {.[[1]][j]} %>% strsplit('\\*') %>% {.[[1]][1]},
level = char$spells %>% strsplit('\\|') %>% {.[[1]][j]} %>% strsplit('\\*') %>% {.[[1]][2]},
processedSpell = char$processedSpells %>% strsplit('\\|') %>% {.[[1]][j]} %>% strsplit('\\*') %>% {.[[1]][1]}
)
}) %>% {names(.) = strsplit(char$spells,'\\|') %>% {.[[1]]};.},
castingStat = char$castingStat,
choices = seq_along(strsplit(char$choices,'\\|') %>% {.[[1]]}) %>% lapply(function(j){
char$choices %>% strsplit('\\|') %>% {.[[1]][j]} %>% strsplit('/') %>% {.[[1]][2]} %>% strsplit('\\*') %>% {.[[1]]}
}) %>% {names(.) = char$choices %>% strsplit('\\|') %>% unlist %>% strsplit('/') %>% map_chr(1);.},
location = list(country = char$country %>% as.character,
countryCode = char$countryCode %>% as.character),
hash = char$hash
)
}) %>% {names(.) = paste(charTable$alias,charTable$class);.}
}
dnd_chars_unique_list = table2list(dnd_chars_unique)
dnd_chars_singleclass_list = table2list(dnd_chars_singleclass)
dnd_chars_multiclass_list = table2list(dnd_chars_multiclass)
dnd_chars_all_list = table2list(dnd_chars_all)
usethis::use_data(dnd_chars_unique_list,overwrite = TRUE)
usethis::use_data(dnd_chars_singleclass_list,overwrite = TRUE)
usethis::use_data(dnd_chars_multiclass_list,overwrite = TRUE)
usethis::use_data(dnd_chars_all_list,overwrite = TRUE)
dnd_chars_unique_list %>% jsonlite::toJSON(pretty = TRUE) %>% writeLines(here('data-raw/dnd_chars_unique.json'))
dnd_chars_all_list %>% jsonlite::toJSON(pretty = TRUE) %>% writeLines(here('data-raw/dnd_chars_all.json'))
|
library(ggplot2)
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
data <- read.csv('../salesdata/dataforC3Fig3.csv')
data$ratiomax[data$ratiomax > 10] <- 10
p <- ggplot(data, aes(x = year)) +
geom_line(aes(y = ratio), color = 'black') +
geom_ribbon(aes(ymin = ratiomin, ymax = ratiomax), fill = 'gray60', alpha = 0.4) +
theme_bw() +
labs(x = '', y = 'ratio') +
scale_y_continuous(limits = c(0.5, 3.02)) +
theme(text = element_text(size = 18, family = "Avenir Next Medium"), panel.border = element_blank()) +
theme(axis.line = element_line(color = 'black'),
legend.position = 'none')
tiff("~/Dropbox/book/chapter3/images/C3Fig3salesratio.tiff", height = 5, width = 9, units = 'in', res=400)
plot(p)
dev.off()
plot(p) | /chapter3/rplots/C3Fig3salesratio.R | permissive | ericayhayes/horizon | R | false | false | 752 | r | library(ggplot2)
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
data <- read.csv('../salesdata/dataforC3Fig3.csv')
data$ratiomax[data$ratiomax > 10] <- 10
p <- ggplot(data, aes(x = year)) +
geom_line(aes(y = ratio), color = 'black') +
geom_ribbon(aes(ymin = ratiomin, ymax = ratiomax), fill = 'gray60', alpha = 0.4) +
theme_bw() +
labs(x = '', y = 'ratio') +
scale_y_continuous(limits = c(0.5, 3.02)) +
theme(text = element_text(size = 18, family = "Avenir Next Medium"), panel.border = element_blank()) +
theme(axis.line = element_line(color = 'black'),
legend.position = 'none')
tiff("~/Dropbox/book/chapter3/images/C3Fig3salesratio.tiff", height = 5, width = 9, units = 'in', res=400)
plot(p)
dev.off()
plot(p) |
#Results CIC2018 SVM Random
accRandomSVM <- c(0.811913095539641,0.810730651438045,0.811003523153798,0.810097404840306,0.810437055288184,0.811156653694706,0.811715062058166,0.810814700531927,0.810493471803255,0.809832592626706)
accRandomSVM = round(accRandomSVM, 3)
f1RandomSVM <- c(0.798843323063073,0.798074573895639,0.798316061114699,0.796920396258479,0.797259219957399,0.798009441656055,0.799148219429761,0.798044788764396,0.797626799075582,0.796320964365735)
f1RandomSVM = round(f1RandomSVM, 3)
precisionRandomSVM <- c(0.843955740487579,0.841576280155723,0.841687663458994,0.841585048368238,0.842164517191440,0.842624793687135,0.843243152603648,0.842739455930962,0.841686215634470,0.842369147349385)
precisionRandomSVM = round(precisionRandomSVM, 3)
recallRandomSVM <- c(0.811913095539641,0.810730651438045,0.811003523153798,0.810097404840306,0.810437055288184,0.811156653694706,0.811715062058166,0.810814700531927,0.810493471803255,0.809832592626706)
recallRandomSVM = round(recallRandomSVM, 3)
#Results CIC2018 SVM Cluster Centroids
accClusterCentroidsSVM <- c(0.969396918967462,0.971112441568609,0.971918391783913,0.970260437055288,0.971100927994105,0.970156814884749,0.971054873696087,0.970398599949340,0.969949570543671,0.971008819398070)
accClusterCentroidsSVM = round(accClusterCentroidsSVM, 3)
f1ClusterCentroidsSVM <- c(0.968158861081169,0.970021083232801,0.970906759969250,0.969009226403854,0.969905432946306,0.968993336167312,0.969967810420753,0.969149931817169,0.968715988837992,0.969869147345591)
f1ClusterCentroidsSVM = round(f1ClusterCentroidsSVM, 3)
precisionClusterCentroidsSVM <- c(0.969364809794403,0.971453645530839,0.972101503835049,0.970518348358196,0.971371617376496,0.970309070368199,0.971404683641797,0.970518357685004,0.970090472253306,0.971339297654209)
precisionClusterCentroidsSVM = round(precisionClusterCentroidsSVM, 3)
recallClusterCentroidsSVM <- c(0.969396918967462,0.971112441568609,0.971918391783913,0.970260437055288,0.971100927994105,0.970156814884749,0.971054873696087,0.970398599949340,0.969949570543671,0.971008819398070)
recallClusterCentroidsSVM = round(recallClusterCentroidsSVM, 3)
#Results CIC2018 SVM Cluster Centroids
accNearMissSVM <- c(0.84754875999,0.84850438667,0.84773297718,0.84802081654,0.84703064914,0.84825108803,0.84855044097,0.84864254957,0.84947152693,0.84756027356)
accNearMissSVM = round(accNearMissSVM, 3)
f1NearMissSVM <- c(0.81748106539,0.81853476190,0.81755434714,0.81811601981,0.81681559061,0.81834547690,0.81837495909,0.81889196520,0.81966871167,0.81741596578)
f1NearMissSVM = round(f1NearMissSVM, 3)
precisionNearMissSVM <- c(0.81636417077,0.81732065673,0.81634511748,0.81704596707,0.81563848877,0.81728593351,0.81684096460,0.81798970552,0.81832822611,0.81635244794)
precisionNearMissSVM = round(precisionNearMissSVM, 3)
recallNearMissSVM <- c(0.84754875999,0.84850438667,0.84773297718,0.84802081654,0.84703064914,0.84825108803,0.84855044097,0.84864254957,0.84947152693,0.84756027356)
recallNearMissSVM = round(recallNearMissSVM, 3)
# Computando os testes
wilcox.test(accRandomSVM, accClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMAccRC = wilcox.test(accRandomSVM, accClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMAccRC$p.value)
wilcox.test(precisionRandomSVM, precisionClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMPrecisionRC = wilcox.test(precisionRandomSVM, precisionClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMPrecisionRC$p.value)
wilcox.test(f1RandomSVM, f1ClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMF1RC = wilcox.test(f1RandomSVM, f1ClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMF1RC$p.value)
wilcox.test(recallRandomSVM, recallClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMRecallRC = wilcox.test(recallRandomSVM, recallClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMRecallRC$p.value)
wilcox.test(accNearMissSVM, accClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMAccNC = wilcox.test(accNearMissSVM, accClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMAccNC$p.value)
wilcox.test(precisionNearMissSVM, precisionClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMPrecisionNC = wilcox.test(precisionNearMissSVM, precisionClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMPrecisionNC$p.value)
wilcox.test(f1NearMissSVM, f1ClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMF1NC = wilcox.test(f1NearMissSVM, f1ClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMF1NC$p.value)
wilcox.test(recallNearMissSVM, recallClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMRecallNC = wilcox.test(recallNearMissSVM, recallClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMRecallNC$p.value)
################################
pdf(file="accSVMCIC2018.pdf")
par(cex.lab=1.5) # is for y-axis
par(cex.axis=1.5) # is for x-axis
par(mgp=c(2,1,0))
par(mar=c(7,5,1,1))
boxplot(accRandomSVM, accClusterCentroidsSVM, accNearMissSVM,
main = "",
at = c(2,4,5),
names = c( "Aleatória", "Centroides", "NearMiss"),
las = 2,
col = c("red","blue","green"),
border = "red",
horizontal = FALSE,
notch = FALSE,
ylim = c(0.8,1 ), yaxs = "i" , cex.main=1.5,
cex.lab=1.0,
font.main=20,las=0)
dev.off()
pdf(file="f1SVMCIC2018.pdf")
par(cex.lab=1.5) # is for y-axis
par(cex.axis=1.5) # is for x-axis
par(mgp=c(2,1,0))
par(mar=c(7,5,1,1))
boxplot(f1RandomSVM, f1ClusterCentroidsSVM, f1NearMissSVM,
main = "",
at = c(2,4,5),
names = c( "Aleatória", "Centroides", "NearMiss"),
las = 2,
col = c("red","blue","green"),
border = "red",
horizontal = FALSE,
notch = FALSE,
ylim = c(0.7,1 ), yaxs = "i" , cex.main=1.5,
cex.lab=1.0,
font.main=20,las=0)
dev.off()
pdf(file="precisionSVMCIC2018.pdf")
par(cex.lab=1.5) # is for y-axis
par(cex.axis=1.5) # is for x-axis
par(mgp=c(2,1,0))
par(mar=c(7,5,1,1))
boxplot( precisionRandomSVM, precisionClusterCentroidsSVM, precisionNearMissSVM,
main = "",
at = c(2,4,5),
names = c( "Aleatória", "Centroides", "NearMiss"),
las = 2,
col = c("red","blue","green"),
border = "red",
horizontal = FALSE,
notch = FALSE,
ylim = c(0.8,1 ), yaxs = "i" , cex.main=1.5,
cex.lab=1.0,
font.main=20,las=0)
dev.off()
pdf(file="recallSVMCIC2018.pdf")
par(cex.lab=1.5) # is for y-axis
par(cex.axis=1.5) # is for x-axis
par(mgp=c(2,1,0))
par(mar=c(7,5,1,1))
boxplot( recallRandomSVM, recallClusterCentroidsSVM, recallNearMissSVM,
main = "",
at = c(2,4,5),
names = c( "Aleatória", "Centroides", "NearMiss"),
las = 2,
col = c("red","blue","green"),
border = "red",
horizontal = FALSE,
notch = FALSE,
ylim = c(0.8,1 ), yaxs = "i" , cex.main=1.5,
cex.lab=1.0,
font.main=20,las=0)
dev.off()
| /Projeto/CICPlotsR/wilcoxonSVMBoxplotResultsCIC2018.R | no_license | Riccellisp/AprendizagemAutomatica | R | false | false | 7,830 | r | #Results CIC2018 SVM Random
accRandomSVM <- c(0.811913095539641,0.810730651438045,0.811003523153798,0.810097404840306,0.810437055288184,0.811156653694706,0.811715062058166,0.810814700531927,0.810493471803255,0.809832592626706)
accRandomSVM = round(accRandomSVM, 3)
f1RandomSVM <- c(0.798843323063073,0.798074573895639,0.798316061114699,0.796920396258479,0.797259219957399,0.798009441656055,0.799148219429761,0.798044788764396,0.797626799075582,0.796320964365735)
f1RandomSVM = round(f1RandomSVM, 3)
precisionRandomSVM <- c(0.843955740487579,0.841576280155723,0.841687663458994,0.841585048368238,0.842164517191440,0.842624793687135,0.843243152603648,0.842739455930962,0.841686215634470,0.842369147349385)
precisionRandomSVM = round(precisionRandomSVM, 3)
recallRandomSVM <- c(0.811913095539641,0.810730651438045,0.811003523153798,0.810097404840306,0.810437055288184,0.811156653694706,0.811715062058166,0.810814700531927,0.810493471803255,0.809832592626706)
recallRandomSVM = round(recallRandomSVM, 3)
#Results CIC2018 SVM Cluster Centroids
accClusterCentroidsSVM <- c(0.969396918967462,0.971112441568609,0.971918391783913,0.970260437055288,0.971100927994105,0.970156814884749,0.971054873696087,0.970398599949340,0.969949570543671,0.971008819398070)
accClusterCentroidsSVM = round(accClusterCentroidsSVM, 3)
f1ClusterCentroidsSVM <- c(0.968158861081169,0.970021083232801,0.970906759969250,0.969009226403854,0.969905432946306,0.968993336167312,0.969967810420753,0.969149931817169,0.968715988837992,0.969869147345591)
f1ClusterCentroidsSVM = round(f1ClusterCentroidsSVM, 3)
precisionClusterCentroidsSVM <- c(0.969364809794403,0.971453645530839,0.972101503835049,0.970518348358196,0.971371617376496,0.970309070368199,0.971404683641797,0.970518357685004,0.970090472253306,0.971339297654209)
precisionClusterCentroidsSVM = round(precisionClusterCentroidsSVM, 3)
recallClusterCentroidsSVM <- c(0.969396918967462,0.971112441568609,0.971918391783913,0.970260437055288,0.971100927994105,0.970156814884749,0.971054873696087,0.970398599949340,0.969949570543671,0.971008819398070)
recallClusterCentroidsSVM = round(recallClusterCentroidsSVM, 3)
#Results CIC2018 SVM Cluster Centroids
accNearMissSVM <- c(0.84754875999,0.84850438667,0.84773297718,0.84802081654,0.84703064914,0.84825108803,0.84855044097,0.84864254957,0.84947152693,0.84756027356)
accNearMissSVM = round(accNearMissSVM, 3)
f1NearMissSVM <- c(0.81748106539,0.81853476190,0.81755434714,0.81811601981,0.81681559061,0.81834547690,0.81837495909,0.81889196520,0.81966871167,0.81741596578)
f1NearMissSVM = round(f1NearMissSVM, 3)
precisionNearMissSVM <- c(0.81636417077,0.81732065673,0.81634511748,0.81704596707,0.81563848877,0.81728593351,0.81684096460,0.81798970552,0.81832822611,0.81635244794)
precisionNearMissSVM = round(precisionNearMissSVM, 3)
recallNearMissSVM <- c(0.84754875999,0.84850438667,0.84773297718,0.84802081654,0.84703064914,0.84825108803,0.84855044097,0.84864254957,0.84947152693,0.84756027356)
recallNearMissSVM = round(recallNearMissSVM, 3)
# Computando os testes
wilcox.test(accRandomSVM, accClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMAccRC = wilcox.test(accRandomSVM, accClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMAccRC$p.value)
wilcox.test(precisionRandomSVM, precisionClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMPrecisionRC = wilcox.test(precisionRandomSVM, precisionClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMPrecisionRC$p.value)
wilcox.test(f1RandomSVM, f1ClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMF1RC = wilcox.test(f1RandomSVM, f1ClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMF1RC$p.value)
wilcox.test(recallRandomSVM, recallClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMRecallRC = wilcox.test(recallRandomSVM, recallClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMRecallRC$p.value)
wilcox.test(accNearMissSVM, accClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMAccNC = wilcox.test(accNearMissSVM, accClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMAccNC$p.value)
wilcox.test(precisionNearMissSVM, precisionClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMPrecisionNC = wilcox.test(precisionNearMissSVM, precisionClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMPrecisionNC$p.value)
wilcox.test(f1NearMissSVM, f1ClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMF1NC = wilcox.test(f1NearMissSVM, f1ClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMF1NC$p.value)
wilcox.test(recallNearMissSVM, recallClusterCentroidsSVM, paired = TRUE,alternative = "l")
wilcoxonTestSVMRecallNC = wilcox.test(recallNearMissSVM, recallClusterCentroidsSVM, paired = TRUE,alternative = "l")
print(wilcoxonTestSVMRecallNC$p.value)
################################
pdf(file="accSVMCIC2018.pdf")
par(cex.lab=1.5) # is for y-axis
par(cex.axis=1.5) # is for x-axis
par(mgp=c(2,1,0))
par(mar=c(7,5,1,1))
boxplot(accRandomSVM, accClusterCentroidsSVM, accNearMissSVM,
main = "",
at = c(2,4,5),
names = c( "Aleatória", "Centroides", "NearMiss"),
las = 2,
col = c("red","blue","green"),
border = "red",
horizontal = FALSE,
notch = FALSE,
ylim = c(0.8,1 ), yaxs = "i" , cex.main=1.5,
cex.lab=1.0,
font.main=20,las=0)
dev.off()
pdf(file="f1SVMCIC2018.pdf")
par(cex.lab=1.5) # is for y-axis
par(cex.axis=1.5) # is for x-axis
par(mgp=c(2,1,0))
par(mar=c(7,5,1,1))
boxplot(f1RandomSVM, f1ClusterCentroidsSVM, f1NearMissSVM,
main = "",
at = c(2,4,5),
names = c( "Aleatória", "Centroides", "NearMiss"),
las = 2,
col = c("red","blue","green"),
border = "red",
horizontal = FALSE,
notch = FALSE,
ylim = c(0.7,1 ), yaxs = "i" , cex.main=1.5,
cex.lab=1.0,
font.main=20,las=0)
dev.off()
pdf(file="precisionSVMCIC2018.pdf")
par(cex.lab=1.5) # is for y-axis
par(cex.axis=1.5) # is for x-axis
par(mgp=c(2,1,0))
par(mar=c(7,5,1,1))
boxplot( precisionRandomSVM, precisionClusterCentroidsSVM, precisionNearMissSVM,
main = "",
at = c(2,4,5),
names = c( "Aleatória", "Centroides", "NearMiss"),
las = 2,
col = c("red","blue","green"),
border = "red",
horizontal = FALSE,
notch = FALSE,
ylim = c(0.8,1 ), yaxs = "i" , cex.main=1.5,
cex.lab=1.0,
font.main=20,las=0)
dev.off()
pdf(file="recallSVMCIC2018.pdf")
par(cex.lab=1.5) # is for y-axis
par(cex.axis=1.5) # is for x-axis
par(mgp=c(2,1,0))
par(mar=c(7,5,1,1))
boxplot( recallRandomSVM, recallClusterCentroidsSVM, recallNearMissSVM,
main = "",
at = c(2,4,5),
names = c( "Aleatória", "Centroides", "NearMiss"),
las = 2,
col = c("red","blue","green"),
border = "red",
horizontal = FALSE,
notch = FALSE,
ylim = c(0.8,1 ), yaxs = "i" , cex.main=1.5,
cex.lab=1.0,
font.main=20,las=0)
dev.off()
|
# Trajectory linearisation ------------------------------------------------
#' Linearise a trajectory
#'
#' @param trajectory The trajectory
#' @param margin The margin to add
#' @param no_margin_between_linear Whether to remove the margin if a milestone does not split or converge
#' @param one_edge Whether to assign each cell to an edge only once. Only relevant in case of divergence regions
#' @param equal_cell_width Whether to give each cell an equal width. Useful when plotting heatmaps.
linearise_trajectory <- function(
trajectory,
margin = 0.05,
no_margin_between_linear = TRUE,
one_edge = TRUE,
equal_cell_width = FALSE
) {
if (!is_rooted_milestone_network(trajectory)) {
trajectory <- trajectory %>% add_root()
}
milestone_network <- trajectory$milestone_network
progressions <- trajectory$progressions
if (one_edge | equal_cell_width) {
progressions <- progressions_one_edge(progressions)
}
if (equal_cell_width) {
progressions <- progressions %>%
group_by(from, to) %>%
mutate(percentage = (rank(percentage, ties.method = "random")-1)/n()) %>%
ungroup()
milestone_network <- progressions %>%
group_by(from, to) %>%
summarise(length = n()) %>%
right_join(milestone_network %>% select(-length), c("from", "to")) %>%
mutate(length = ifelse(is.na(length), 0, length)) %>% # add length of edges with no cells
ungroup()
}
margin <- sum(milestone_network$length) * margin
if (no_margin_between_linear) {
# add margin only if froms not directly connected, or if to is a forking milestone, or if to is a converging milestone
milestone_network$add_margin <- (milestone_network$to != lead(milestone_network$from, default = "")) |
(table(milestone_network$from)[milestone_network$to] > 1) |
(table(milestone_network$to)[milestone_network$to] > 1)
} else {
milestone_network$add_margin <- TRUE
}
milestone_network$n_margins <- c(0, cumsum(milestone_network$add_margin)[-nrow(milestone_network)])
milestone_network <- milestone_network %>%
mutate(
cumstart = c(0, cumsum(length)[-n()]) + n_margins * margin,
cumend = cumstart + length,
edge_id = factor(seq_len(n()))
)
progressions <- progressions %>%
left_join(milestone_network, by = c("from", "to")) %>%
mutate(cumpercentage = cumstart + percentage * length)
lst(milestone_network, progressions, margin)
milestone_positions <- bind_rows(
milestone_network %>% select(milestone_id = from, comp_1 = cumstart) %>% mutate(type = "start"),
milestone_network %>% select(milestone_id = to, comp_1 = cumend) %>% mutate(type = "end")
) %>%
mutate(comp_2 = 0)
edge_positions <- milestone_network %>%
select(from, to, comp_1_from = cumstart, comp_1_to = cumend) %>%
mutate(comp_2_from = 0, comp_2_to = 0)
cell_positions <- progressions %>%
select(cell_id, comp_1 = cumpercentage) %>%
mutate(comp_2 = 0)
lst(
milestone_positions,
edge_positions,
cell_positions,
margin = margin
)
}
# Put every cell on the edge with the highest progression
# Only has an effect in case of divergence regions
progressions_one_edge <- function(progressions) {
progressions %>%
group_by(cell_id) %>%
arrange(-percentage) %>%
filter(dplyr::row_number() == 1) %>%
ungroup()
}
# Check whether the order of the edges of the milestone network
# is such that you can easily linearise the edges
is_rooted_milestone_network <- function(trajectory) {
# # does not work yet, e.g.:
# # tribble(from = c("a", "b", "d", "c"), to = c("b", "c", "b", "d"))
# tibble(
# milestone_id = unique(c(milestone_network$from, milestone_network$to)),
# from = match(milestone_id, milestone_network$from),
# to = match(milestone_id, milestone_network$to),
# ok = is.na(from) | is.na(to) | from >= to
# ) %>%
# pull(ok) %>%
# all()
"root_milestone_id" %in% names(trajectory)
}
# Optimise order ----------------------------------------------------------
#' @importFrom GA ga
optimize_order <- function(milestone_network) {
# the first state will be kept at the beginning
n <- nrow(milestone_network)
if (n > 3) {
score_order <- function(ordered) {
from <- milestone_network$from[c(1, ordered+1)]
to <- milestone_network$to[c(1, ordered+1)]
-sum(
((match(from, to) - seq_len(n) + 1)^2) %>% sum(na.rm = TRUE),
((match(to, from) - seq_len(n) - 1)^2) %>% sum(na.rm = TRUE)
)
}
result <- GA::ga(
type = "permutation",
score_order,
lower = 1,
upper = n - 1,
maxiter = 30*nrow(milestone_network),
popSize = 20,
maxFitness = 0,
elitism = 5
)
ordered <- result@solution[1, ]
milestone_network[c(1, ordered+1), ]
} else {
milestone_network
}
}
# will use the ordering of the first trajectory, to optimize the ordering of the second trajectory, maximizing the correlation between the two
map_order <- function(traj, rel_dataset) {
# first get the cell cumulative percentage of the relative traj
margin <- 0
milestone_network <- rel_dataset$milestone_network %>%
mutate(
cumstart = c(0, cumsum(length)[-length(length)]) + margin * (seq_len(n())-1),
cumend = c(cumsum(length)) + margin * (seq_len(n())-1)
)
prog <- rel_dataset$progression %>% left_join(milestone_network, by = c("from", "to")) %>% mutate(cumpercentage = percentage*length + cumstart)
# use these cumulative percentages to find the optimal ordering of the traj of interest, by calculating the mean relative cumulative percentage, and then ordering the milestone_network along this measure
milestone_network_ordered <- traj$progressions %>%
left_join(
prog %>%
group_by(cell_id) %>%
summarise(mincumpercentage = min(cumpercentage)),
by = "cell_id") %>%
group_by(from, to) %>%
summarise(mean_mincumpercentage = mean(mincumpercentage))
# add missing milestone edges (without any cells)
milestone_network_ordered <- milestone_network_ordered %>%
right_join(traj$milestone_network, by = c("from", "to")) %>%
mutate(mean_mincumpercentage = ifelse(is.na(mean_mincumpercentage), Inf, mean_mincumpercentage))
milestone_network_ordered %>% arrange(mean_mincumpercentage) %>% select(from, to, length) %>% ungroup()
}
# Connections between milestones ------------------------------------------
calculate_connections <- function(linearised) {
# get all connections that are necessary
# direct connections are those that are reachable without up
connections <- crossing(
linearised$edge_positions %>% select(from, comp_1_from),
linearised$edge_positions %>% select(to, comp_1_to)
) %>% filter(
from == to,
comp_1_from != comp_1_to
) %>% mutate(
comp_1_diff = abs(comp_1_to-comp_1_from)
) %>%
arrange(comp_1_diff) %>%
select(milestone_id = from, comp_1_from, comp_1_to, comp_1_diff) %>%
mutate(
level = NA,
direct = near(comp_1_diff, linearised$margin)
)
for (i in seq_len(nrow(connections))) {
connection <- connections %>% extract_row_to_list(i)
overlapping_connections <- connections %>%
filter(
dplyr::row_number() < i,
pmax(comp_1_from, comp_1_to) > min(connection$comp_1_from, connection$comp_1_to),
pmin(comp_1_from, comp_1_to) < max(connection$comp_1_from, connection$comp_1_to)
)
if (nrow(overlapping_connections)) {
connections$level[i] <- max(overlapping_connections$level) + 1
} else {
if (connections$direct[i]) {
connections$level[i] <- 0
} else {
connections$level[i] <- 1
}
}
}
# calculate connection positions
connections_direct <- connections %>% filter(direct)
connections_indirect <- connections %>% filter(!direct)
connection_positions <- bind_rows(
connections_direct %>% mutate(connection_ix = 1, comp_2_from = 0, comp_2_to = 0),
connections_indirect %>% mutate(comp_1_to = comp_1_from, comp_2_from = 0, comp_2_to = level, connection_ix = 1),
connections_indirect %>% mutate(comp_2_from = level, comp_2_to = level, connection_ix = 2),
connections_indirect %>% mutate(comp_1_from = comp_1_to, comp_2_from = level, comp_2_to = 0, connection_ix = 3)
)
connection_positions
}
| /R/helper_linearise_cells.R | no_license | dynverse/dynplot2 | R | false | false | 8,345 | r | # Trajectory linearisation ------------------------------------------------
#' Linearise a trajectory
#'
#' @param trajectory The trajectory
#' @param margin The margin to add
#' @param no_margin_between_linear Whether to remove the margin if a milestone does not split or converge
#' @param one_edge Whether to assign each cell to an edge only once. Only relevant in case of divergence regions
#' @param equal_cell_width Whether to give each cell an equal width. Useful when plotting heatmaps.
linearise_trajectory <- function(
trajectory,
margin = 0.05,
no_margin_between_linear = TRUE,
one_edge = TRUE,
equal_cell_width = FALSE
) {
if (!is_rooted_milestone_network(trajectory)) {
trajectory <- trajectory %>% add_root()
}
milestone_network <- trajectory$milestone_network
progressions <- trajectory$progressions
if (one_edge | equal_cell_width) {
progressions <- progressions_one_edge(progressions)
}
if (equal_cell_width) {
progressions <- progressions %>%
group_by(from, to) %>%
mutate(percentage = (rank(percentage, ties.method = "random")-1)/n()) %>%
ungroup()
milestone_network <- progressions %>%
group_by(from, to) %>%
summarise(length = n()) %>%
right_join(milestone_network %>% select(-length), c("from", "to")) %>%
mutate(length = ifelse(is.na(length), 0, length)) %>% # add length of edges with no cells
ungroup()
}
margin <- sum(milestone_network$length) * margin
if (no_margin_between_linear) {
# add margin only if froms not directly connected, or if to is a forking milestone, or if to is a converging milestone
milestone_network$add_margin <- (milestone_network$to != lead(milestone_network$from, default = "")) |
(table(milestone_network$from)[milestone_network$to] > 1) |
(table(milestone_network$to)[milestone_network$to] > 1)
} else {
milestone_network$add_margin <- TRUE
}
milestone_network$n_margins <- c(0, cumsum(milestone_network$add_margin)[-nrow(milestone_network)])
milestone_network <- milestone_network %>%
mutate(
cumstart = c(0, cumsum(length)[-n()]) + n_margins * margin,
cumend = cumstart + length,
edge_id = factor(seq_len(n()))
)
progressions <- progressions %>%
left_join(milestone_network, by = c("from", "to")) %>%
mutate(cumpercentage = cumstart + percentage * length)
lst(milestone_network, progressions, margin)
milestone_positions <- bind_rows(
milestone_network %>% select(milestone_id = from, comp_1 = cumstart) %>% mutate(type = "start"),
milestone_network %>% select(milestone_id = to, comp_1 = cumend) %>% mutate(type = "end")
) %>%
mutate(comp_2 = 0)
edge_positions <- milestone_network %>%
select(from, to, comp_1_from = cumstart, comp_1_to = cumend) %>%
mutate(comp_2_from = 0, comp_2_to = 0)
cell_positions <- progressions %>%
select(cell_id, comp_1 = cumpercentage) %>%
mutate(comp_2 = 0)
lst(
milestone_positions,
edge_positions,
cell_positions,
margin = margin
)
}
# Put every cell on the edge with the highest progression
# Only has an effect in case of divergence regions
progressions_one_edge <- function(progressions) {
progressions %>%
group_by(cell_id) %>%
arrange(-percentage) %>%
filter(dplyr::row_number() == 1) %>%
ungroup()
}
# Check whether the order of the edges of the milestone network
# is such that you can easily linearise the edges
is_rooted_milestone_network <- function(trajectory) {
# # does not work yet, e.g.:
# # tribble(from = c("a", "b", "d", "c"), to = c("b", "c", "b", "d"))
# tibble(
# milestone_id = unique(c(milestone_network$from, milestone_network$to)),
# from = match(milestone_id, milestone_network$from),
# to = match(milestone_id, milestone_network$to),
# ok = is.na(from) | is.na(to) | from >= to
# ) %>%
# pull(ok) %>%
# all()
"root_milestone_id" %in% names(trajectory)
}
# Optimise order ----------------------------------------------------------
#' @importFrom GA ga
optimize_order <- function(milestone_network) {
# the first state will be kept at the beginning
n <- nrow(milestone_network)
if (n > 3) {
score_order <- function(ordered) {
from <- milestone_network$from[c(1, ordered+1)]
to <- milestone_network$to[c(1, ordered+1)]
-sum(
((match(from, to) - seq_len(n) + 1)^2) %>% sum(na.rm = TRUE),
((match(to, from) - seq_len(n) - 1)^2) %>% sum(na.rm = TRUE)
)
}
result <- GA::ga(
type = "permutation",
score_order,
lower = 1,
upper = n - 1,
maxiter = 30*nrow(milestone_network),
popSize = 20,
maxFitness = 0,
elitism = 5
)
ordered <- result@solution[1, ]
milestone_network[c(1, ordered+1), ]
} else {
milestone_network
}
}
# will use the ordering of the first trajectory, to optimize the ordering of the second trajectory, maximizing the correlation between the two
map_order <- function(traj, rel_dataset) {
# first get the cell cumulative percentage of the relative traj
margin <- 0
milestone_network <- rel_dataset$milestone_network %>%
mutate(
cumstart = c(0, cumsum(length)[-length(length)]) + margin * (seq_len(n())-1),
cumend = c(cumsum(length)) + margin * (seq_len(n())-1)
)
prog <- rel_dataset$progression %>% left_join(milestone_network, by = c("from", "to")) %>% mutate(cumpercentage = percentage*length + cumstart)
# use these cumulative percentages to find the optimal ordering of the traj of interest, by calculating the mean relative cumulative percentage, and then ordering the milestone_network along this measure
milestone_network_ordered <- traj$progressions %>%
left_join(
prog %>%
group_by(cell_id) %>%
summarise(mincumpercentage = min(cumpercentage)),
by = "cell_id") %>%
group_by(from, to) %>%
summarise(mean_mincumpercentage = mean(mincumpercentage))
# add missing milestone edges (without any cells)
milestone_network_ordered <- milestone_network_ordered %>%
right_join(traj$milestone_network, by = c("from", "to")) %>%
mutate(mean_mincumpercentage = ifelse(is.na(mean_mincumpercentage), Inf, mean_mincumpercentage))
milestone_network_ordered %>% arrange(mean_mincumpercentage) %>% select(from, to, length) %>% ungroup()
}
# Connections between milestones ------------------------------------------
calculate_connections <- function(linearised) {
# get all connections that are necessary
# direct connections are those that are reachable without up
connections <- crossing(
linearised$edge_positions %>% select(from, comp_1_from),
linearised$edge_positions %>% select(to, comp_1_to)
) %>% filter(
from == to,
comp_1_from != comp_1_to
) %>% mutate(
comp_1_diff = abs(comp_1_to-comp_1_from)
) %>%
arrange(comp_1_diff) %>%
select(milestone_id = from, comp_1_from, comp_1_to, comp_1_diff) %>%
mutate(
level = NA,
direct = near(comp_1_diff, linearised$margin)
)
for (i in seq_len(nrow(connections))) {
connection <- connections %>% extract_row_to_list(i)
overlapping_connections <- connections %>%
filter(
dplyr::row_number() < i,
pmax(comp_1_from, comp_1_to) > min(connection$comp_1_from, connection$comp_1_to),
pmin(comp_1_from, comp_1_to) < max(connection$comp_1_from, connection$comp_1_to)
)
if (nrow(overlapping_connections)) {
connections$level[i] <- max(overlapping_connections$level) + 1
} else {
if (connections$direct[i]) {
connections$level[i] <- 0
} else {
connections$level[i] <- 1
}
}
}
# calculate connection positions
connections_direct <- connections %>% filter(direct)
connections_indirect <- connections %>% filter(!direct)
connection_positions <- bind_rows(
connections_direct %>% mutate(connection_ix = 1, comp_2_from = 0, comp_2_to = 0),
connections_indirect %>% mutate(comp_1_to = comp_1_from, comp_2_from = 0, comp_2_to = level, connection_ix = 1),
connections_indirect %>% mutate(comp_2_from = level, comp_2_to = level, connection_ix = 2),
connections_indirect %>% mutate(comp_1_from = comp_1_to, comp_2_from = level, comp_2_to = 0, connection_ix = 3)
)
connection_positions
}
|
\docType{package}
\name{ropensciToolkit}
\alias{package-ropensciToolkit}
\alias{ropensciToolkit}
\alias{ropensciToolkit-package}
\title{ropensciToolkit: A helper package for packages in the rOpenSci suite.}
\description{
\href{http://ropensci.org}{rOpenSci} is a project that
aims to develop R based tools to facilitate open science.
This helper package provides classes and methods that
apply to across the board (see
\href{http://ropensci.org/packages}{full list of
suppported packages}) and eventually become part of an
umbrella package named \strong{rOpenSci}
}
| /man/ropensciToolkit.Rd | no_license | imclab/ropensciToolkit | R | false | false | 581 | rd | \docType{package}
\name{ropensciToolkit}
\alias{package-ropensciToolkit}
\alias{ropensciToolkit}
\alias{ropensciToolkit-package}
\title{ropensciToolkit: A helper package for packages in the rOpenSci suite.}
\description{
\href{http://ropensci.org}{rOpenSci} is a project that
aims to develop R based tools to facilitate open science.
This helper package provides classes and methods that
apply to across the board (see
\href{http://ropensci.org/packages}{full list of
suppported packages}) and eventually become part of an
umbrella package named \strong{rOpenSci}
}
|
#' Classification of cell of origin
#'
#' Classification of DLBCL samples according to the ABC/GCB classifier.
#'
#' @rdname ABCGCB
#' @aliases
#' ABCGCB
#' ABCGCBClassifier
#' ABCGCBclassifier
#' @param new.data An expression matrix.
#' @param NC.range A \code{numeric} vector with values for which the
#' probabilities should be cut.
#' @return A \code{list} of probabilities and classes regarding each patients
#' association with the classes.
#' @details The function ABC/GCB classifies DLBCL patients according to the
#' cell of origin for the tumor.
#' @references Reference to the ABC/GCB and hemaClass paper.
#' @author
#' Steffen Falgreen <sfl (at) rn.dk> \cr
#' Anders Ellern Bilgrau <abilgrau (at) math.aau.dk>
#' @examples
#' \donttest{
#' files <- dir(system.file("extdata/celfiles", package = "hemaClass"),
#' full.names = TRUE)
#' affyBatch <- readCelfiles(filenames = files)
#'
#' # The cel files are pre-processed
#' affyRMA <- rmaPreprocessing(affyBatch)
#'
#' # The function rmaPreprocessing returns median centered and scaled
#' # expression values in the slot exprs.sc.
#'
#' # The slot exprs.sc.mean contains mean cetered and scaled expression values.
#' # This scaling can also be achieved using the function microarrayScale.
#' affyRMA.sc <- microarrayScale(affyRMA$exprs, center = "median")
#'
#' # We may now use the ABCGCB classifier
#' ABCGCB(affyRMA.sc)
#' }
#' @export
ABCGCB <- function(new.data, NC.range = c(0.1, 0.9)) {
new.data[is.na(new.data)] <- 0
prob <- ABCGCBProbFun(new.data)
class <- rep("NC", length(prob))
class[prob < NC.range[1]] <- "GCB"
class[prob > NC.range[2]] <- "ABC"
class <- factor(class, levels = c("ABC", "NC", "GCB"))
return(list(prob = prob, class = class))
}
ABCGCBProbFun <- function(newx) {
ABCGCB.coef <- readABCGCBCoef()
diff <- setdiff(row.names(ABCGCB.coef)[-1], rownames(newx))
if (length(diff)) {
missing <- matrix(0, ncol = ncol(newx), nrow = length(diff),
dimnames = list(diff, colnames(newx) ))
newx <- rbind(newx, missing)
warning("The following probesets are missing:\n",
paste(diff, collapse = ", "))
}
x <- rbind(1, newx[row.names(ABCGCB.coef)[-1], , drop = FALSE])
prob.mat <- matrix(ncol = 1, nrow = ncol(x))
rownames(prob.mat) <- colnames(x)
prob.mat[,1] <- t(x) %*% ABCGCB.coef
return(1 / (1 + exp(-prob.mat)))
} | /R/ABCGCB.R | permissive | oncoclass/hemaClass | R | false | false | 2,570 | r | #' Classification of cell of origin
#'
#' Classification of DLBCL samples according to the ABC/GCB classifier.
#'
#' @rdname ABCGCB
#' @aliases
#' ABCGCB
#' ABCGCBClassifier
#' ABCGCBclassifier
#' @param new.data An expression matrix.
#' @param NC.range A \code{numeric} vector with values for which the
#' probabilities should be cut.
#' @return A \code{list} of probabilities and classes regarding each patients
#' association with the classes.
#' @details The function ABC/GCB classifies DLBCL patients according to the
#' cell of origin for the tumor.
#' @references Reference to the ABC/GCB and hemaClass paper.
#' @author
#' Steffen Falgreen <sfl (at) rn.dk> \cr
#' Anders Ellern Bilgrau <abilgrau (at) math.aau.dk>
#' @examples
#' \donttest{
#' files <- dir(system.file("extdata/celfiles", package = "hemaClass"),
#' full.names = TRUE)
#' affyBatch <- readCelfiles(filenames = files)
#'
#' # The cel files are pre-processed
#' affyRMA <- rmaPreprocessing(affyBatch)
#'
#' # The function rmaPreprocessing returns median centered and scaled
#' # expression values in the slot exprs.sc.
#'
#' # The slot exprs.sc.mean contains mean cetered and scaled expression values.
#' # This scaling can also be achieved using the function microarrayScale.
#' affyRMA.sc <- microarrayScale(affyRMA$exprs, center = "median")
#'
#' # We may now use the ABCGCB classifier
#' ABCGCB(affyRMA.sc)
#' }
#' @export
ABCGCB <- function(new.data, NC.range = c(0.1, 0.9)) {
new.data[is.na(new.data)] <- 0
prob <- ABCGCBProbFun(new.data)
class <- rep("NC", length(prob))
class[prob < NC.range[1]] <- "GCB"
class[prob > NC.range[2]] <- "ABC"
class <- factor(class, levels = c("ABC", "NC", "GCB"))
return(list(prob = prob, class = class))
}
ABCGCBProbFun <- function(newx) {
ABCGCB.coef <- readABCGCBCoef()
diff <- setdiff(row.names(ABCGCB.coef)[-1], rownames(newx))
if (length(diff)) {
missing <- matrix(0, ncol = ncol(newx), nrow = length(diff),
dimnames = list(diff, colnames(newx) ))
newx <- rbind(newx, missing)
warning("The following probesets are missing:\n",
paste(diff, collapse = ", "))
}
x <- rbind(1, newx[row.names(ABCGCB.coef)[-1], , drop = FALSE])
prob.mat <- matrix(ncol = 1, nrow = ncol(x))
rownames(prob.mat) <- colnames(x)
prob.mat[,1] <- t(x) %*% ABCGCB.coef
return(1 / (1 + exp(-prob.mat)))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jara_utils.R
\name{iucn_frame}
\alias{iucn_frame}
\title{iucn_frame}
\usage{
iucn_frame(
xylim = c(100, 1),
plot.cex = 1,
legend.cex = 0.9,
criteria = c("A2", "A1")[1],
iucn.cols = TRUE,
add = FALSE
)
}
\arguments{
\item{xylim}{determines upper x and y lims}
\item{plot.cex}{cex graphic option}
\item{legend.cex}{lengend size cex graphic option}
\item{criteria}{option to choose between IUCN A1 or A2 thresholds (A2 is default)}
\item{iucn.cols}{to use iucn color recommendation or a brighter version if FALSE}
\item{add}{if TRUE par is not called to enable manual multiplots}
}
\description{
Empty iucn color plot
}
\author{
Henning Winker, Richard Sherley and Nathan Pacoureau
}
| /man/iucn_frame.Rd | no_license | Henning-Winker/JARA | R | false | true | 776 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jara_utils.R
\name{iucn_frame}
\alias{iucn_frame}
\title{iucn_frame}
\usage{
iucn_frame(
xylim = c(100, 1),
plot.cex = 1,
legend.cex = 0.9,
criteria = c("A2", "A1")[1],
iucn.cols = TRUE,
add = FALSE
)
}
\arguments{
\item{xylim}{determines upper x and y lims}
\item{plot.cex}{cex graphic option}
\item{legend.cex}{lengend size cex graphic option}
\item{criteria}{option to choose between IUCN A1 or A2 thresholds (A2 is default)}
\item{iucn.cols}{to use iucn color recommendation or a brighter version if FALSE}
\item{add}{if TRUE par is not called to enable manual multiplots}
}
\description{
Empty iucn color plot
}
\author{
Henning Winker, Richard Sherley and Nathan Pacoureau
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.