content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
library(numDeriv)
library(statmod)
N <- 10000
mu <- 1
lambda <- 1.5
t0 <- 0.4
parms <- c(mu, lambda, t0)
t <- rinvgauss(N, mu, lambda) + t0
loglik <- function(t, parms){
mu <- parms[1]
lambda <- parms[2]
t0 <- parms[3]
sum(log())
}
hess <- function(t, parms) hessian(func=function(t, parms) log(dinvgauss(t-parms[3], parms[1], parms[2])),
x=parms, t=t)
res <- lapply(t, hess, parms=parms)
res2 <- matrix(0, nrow=3, ncol=3)
for(i in 1:N){
res2 <- res2 + res[[i]]/N
}
FIM <- -res2
FIM
round(solve(FIM), 3)
round(cov2cor(solve(FIM)),3)
FIM_new <- FIM*200
sqrt(diag(round(solve(FIM_new), 3)))
# Verify this
N_data <- 200
N_sim <- 10000
ML_estimates <- matrix(nrow=N_sim, ncol=length(parms))
minus_loglik <- function(t, parms){
mu <- parms[1]
lambda <- parms[2]
t0 <- parms[3]
-sum(log(dinvgauss(t-t0, mu, lambda)))
}
for(i in 1:N_sim){
t <- rinvgauss(N_data, mu, lambda) + t0
fit <- optim(parms, minus_loglik, t=t,
control=list(maxit=10000), method="BFGS")
ML_estimates[i,] <- fit$par
}
apply(ML_estimates, 2, sd)
sqrt(diag(solve(FIM))/N_data) # asymptotic standard error of ML estimator
|
/FIM_invgauss.R
|
no_license
|
JoonsukPark/examples
|
R
| false
| false
| 1,278
|
r
|
library(numDeriv)
library(statmod)
N <- 10000
mu <- 1
lambda <- 1.5
t0 <- 0.4
parms <- c(mu, lambda, t0)
t <- rinvgauss(N, mu, lambda) + t0
loglik <- function(t, parms){
mu <- parms[1]
lambda <- parms[2]
t0 <- parms[3]
sum(log())
}
hess <- function(t, parms) hessian(func=function(t, parms) log(dinvgauss(t-parms[3], parms[1], parms[2])),
x=parms, t=t)
res <- lapply(t, hess, parms=parms)
res2 <- matrix(0, nrow=3, ncol=3)
for(i in 1:N){
res2 <- res2 + res[[i]]/N
}
FIM <- -res2
FIM
round(solve(FIM), 3)
round(cov2cor(solve(FIM)),3)
FIM_new <- FIM*200
sqrt(diag(round(solve(FIM_new), 3)))
# Verify this
N_data <- 200
N_sim <- 10000
ML_estimates <- matrix(nrow=N_sim, ncol=length(parms))
minus_loglik <- function(t, parms){
mu <- parms[1]
lambda <- parms[2]
t0 <- parms[3]
-sum(log(dinvgauss(t-t0, mu, lambda)))
}
for(i in 1:N_sim){
t <- rinvgauss(N_data, mu, lambda) + t0
fit <- optim(parms, minus_loglik, t=t,
control=list(maxit=10000), method="BFGS")
ML_estimates[i,] <- fit$par
}
apply(ML_estimates, 2, sd)
sqrt(diag(solve(FIM))/N_data) # asymptotic standard error of ML estimator
|
## The overal function takes a value for matrix and stores inverse of that matrix in the cache
## this function takes in a matrix and stores the inverse in the cache
makeCacheMatrix <- function(x = matrix()) {
n <- NULL
set <- function(matrix){
x <<- matrix
n <<- NULL
}
get <- function() x
setinverse <- function(inverse) n <<- inverse
getinverse <- function() n
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function checks the cache if the is a data to use for matrix if not it creates one and stores it.
cacheSolve <- function(x, ...) {
n <- x$getinverse()
if (!is.null(n)){
message("getting cached data")
return(n)
}
data <- x$get()
n <- solve(data, ...)
x$setinverse(n)
n
}
|
/cachematrix.R
|
no_license
|
lseino/ProgrammingAssignment2
|
R
| false
| false
| 792
|
r
|
## The overal function takes a value for matrix and stores inverse of that matrix in the cache
## this function takes in a matrix and stores the inverse in the cache
makeCacheMatrix <- function(x = matrix()) {
n <- NULL
set <- function(matrix){
x <<- matrix
n <<- NULL
}
get <- function() x
setinverse <- function(inverse) n <<- inverse
getinverse <- function() n
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function checks the cache if the is a data to use for matrix if not it creates one and stores it.
cacheSolve <- function(x, ...) {
n <- x$getinverse()
if (!is.null(n)){
message("getting cached data")
return(n)
}
data <- x$get()
n <- solve(data, ...)
x$setinverse(n)
n
}
|
e_quakes<-datasets::quakes
#perform everything + below
#skewness
#kurtosis
#var
#standard deviation
e_quakes
####Top 10 rows and last 10 rows
head(e_quakes,10)
tail(e_quakes,10)
######Columns
e_quakes[,c(1,2)]
df<-e_quakes[,-6]
summary(e_quakes[,1])
summary(e_quakes)
e_quakes$lat
###########Summary of the data#########
summary(e_quakes$lat)
summary(e_quakes)
summary(e_quakes$long)
#####################
plot(e_quakes$lat)
plot(e_quakes$lat,e_quakes$long,type="p")
plot(e_quakes)
# points and lines
plot(e_quakes$lat, type= "b") # p: points, l: lines,b: both
plot(e_quakes$lat, ylab = 'lat',
xlab = 'No of Instances', main = 'lat of earthquakes',
col = 'blue')
# Horizontal bar plot
barplot(e_quakes$lat, main = 'lat of earthquakes',
ylab = 'lat', col= 'blue',horiz = T,axes=T)
#Histogram
hist(e_quakes$lat)
hist(e_quakes$long,
main = 'longitude of earthquake',
xlab = 'long', col='blue')
#Single box plot
boxplot(e_quakes$lat,main="Temp_Boxplot")
# Multiple box plots
boxplot(e_quakes,main='Multiple')
#margin of the grid(mar),
#no of rows and columns(mfrow),
#whether a border is to be included(bty)
#and position of the
#labels(las: 1 for horizontal, las: 0 for vertical)
#bty - box around the plot
par(mfrow=c(3,3),mar=c(2,5,2,1), las=0, bty="o")
plot(e_quakes$lat)
plot(e_quakes$lat, e_quakes$long)
plot(e_quakes$lat, type= "l")
plot(e_quakes$lat, type= "l")
plot(e_quakes$lat, type= "l")
barplot(e_quakes$lat, main = 'lat of earthquakes',
xlab = 'lat', col='green',horiz = TRUE)
hist(e_quakes$lat)
boxplot(e_quakes$lat)
boxplot(e_quakes[,0:4], main='Multiple Box plots')
#skewness
skewness(e_quakes)
#kurtosis
kurtosis(e_quakes)
#variance
variance(e_quakes$mag,e_quakes$stations, v=1)
#standard deviation
sd(e_quakes$lat,na.rm = FALSE)
|
/Exercise.R
|
no_license
|
ajawati/Data-Science_R
|
R
| false
| false
| 1,893
|
r
|
e_quakes<-datasets::quakes
#perform everything + below
#skewness
#kurtosis
#var
#standard deviation
e_quakes
####Top 10 rows and last 10 rows
head(e_quakes,10)
tail(e_quakes,10)
######Columns
e_quakes[,c(1,2)]
df<-e_quakes[,-6]
summary(e_quakes[,1])
summary(e_quakes)
e_quakes$lat
###########Summary of the data#########
summary(e_quakes$lat)
summary(e_quakes)
summary(e_quakes$long)
#####################
plot(e_quakes$lat)
plot(e_quakes$lat,e_quakes$long,type="p")
plot(e_quakes)
# points and lines
plot(e_quakes$lat, type= "b") # p: points, l: lines,b: both
plot(e_quakes$lat, ylab = 'lat',
xlab = 'No of Instances', main = 'lat of earthquakes',
col = 'blue')
# Horizontal bar plot
barplot(e_quakes$lat, main = 'lat of earthquakes',
ylab = 'lat', col= 'blue',horiz = T,axes=T)
#Histogram
hist(e_quakes$lat)
hist(e_quakes$long,
main = 'longitude of earthquake',
xlab = 'long', col='blue')
#Single box plot
boxplot(e_quakes$lat,main="Temp_Boxplot")
# Multiple box plots
boxplot(e_quakes,main='Multiple')
#margin of the grid(mar),
#no of rows and columns(mfrow),
#whether a border is to be included(bty)
#and position of the
#labels(las: 1 for horizontal, las: 0 for vertical)
#bty - box around the plot
par(mfrow=c(3,3),mar=c(2,5,2,1), las=0, bty="o")
plot(e_quakes$lat)
plot(e_quakes$lat, e_quakes$long)
plot(e_quakes$lat, type= "l")
plot(e_quakes$lat, type= "l")
plot(e_quakes$lat, type= "l")
barplot(e_quakes$lat, main = 'lat of earthquakes',
xlab = 'lat', col='green',horiz = TRUE)
hist(e_quakes$lat)
boxplot(e_quakes$lat)
boxplot(e_quakes[,0:4], main='Multiple Box plots')
#skewness
skewness(e_quakes)
#kurtosis
kurtosis(e_quakes)
#variance
variance(e_quakes$mag,e_quakes$stations, v=1)
#standard deviation
sd(e_quakes$lat,na.rm = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_firebrowse.R
\name{loadFirebrowseFolders}
\alias{loadFirebrowseFolders}
\alias{loadFirehoseFolders}
\title{Load Firebrowse folders}
\usage{
loadFirebrowseFolders(folder, exclude = "", progress = echoProgress)
loadFirehoseFolders(folder, exclude = "", progress = echoProgress)
}
\arguments{
\item{folder}{Character: folder(s) in which to look for Firebrowse files}
\item{exclude}{Character: files to exclude from the loading}
\item{progress}{Function to show the progress (default is to print progress
to console)}
}
\value{
List with loaded data.frames
}
\description{
Loads the files present in each folder as a data.frame.
}
\note{
For faster execution, this function uses the \code{readr} library. This
function ignores subfolders of the given folder (which means that files
inside subfolders are NOT loaded).
}
|
/man/loadFirebrowseFolders.Rd
|
no_license
|
mgandal/psichomics
|
R
| false
| true
| 902
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_firebrowse.R
\name{loadFirebrowseFolders}
\alias{loadFirebrowseFolders}
\alias{loadFirehoseFolders}
\title{Load Firebrowse folders}
\usage{
loadFirebrowseFolders(folder, exclude = "", progress = echoProgress)
loadFirehoseFolders(folder, exclude = "", progress = echoProgress)
}
\arguments{
\item{folder}{Character: folder(s) in which to look for Firebrowse files}
\item{exclude}{Character: files to exclude from the loading}
\item{progress}{Function to show the progress (default is to print progress
to console)}
}
\value{
List with loaded data.frames
}
\description{
Loads the files present in each folder as a data.frame.
}
\note{
For faster execution, this function uses the \code{readr} library. This
function ignores subfolders of the given folder (which means that files
inside subfolders are NOT loaded).
}
|
writeTpmap <- function(filename, bpmaplist, verbose = 0){
writeSequence <- function(seq){
if(length(setdiff(c("seqInfo", "pmx", "pmy", "probeseq", "startpos", "strand"),
names(seq))) != 0 ||
length(setdiff(c("groupname", "version", "name"),
names(seq$seqInfo))) != 0) {
cat(" ... skipping a sequence due to missing slots\n")
return(NULL)
}
seqInfo <- seq$seqInfo
if(length(setdiff(c("groupname", "version", "name"), names(seqInfo))) != 0) {
stop("Need a seqInfo component with 'groupname', 'version', 'name' sub-components")
}
writeLines(paste("#seq_group_name", seqInfo$groupname), con = out, sep = "\n")
writeLines(paste("#version", seqInfo$version), con = out, sep = "\n")
if(!is.null(seqInfo$parameters)) {
for(tag in names(seqInfo$parameters))
writeLines(paste("#", tag, " ", seqInfo$parameters[tag], sep = ""),
con = out, sep = "\n")
}
hits <- t(do.call(cbind, c(seq[c("probeseq", "strand")],
list(groupname = rep(seqInfo$name, length(seq$pmx))),
seq[c("startpos", "pmx", "pmy", "mmx", "mmy", "matchscore")])))
write(hits, file = out, ncolumns = nrow(hits), append = TRUE)
return(NULL)
} # writeSequence()
if (file.exists(filename)) {
stop("Could not write TPMAP file. File already exists: ", filename)
}
out <- file(filename, open = "w")
on.exit(close(out))
for(i in seq_along(bpmaplist)) {
if(verbose)
cat(paste("Writing sequence", names(bpmaplist)[i], "\n"))
writeSequence(bpmaplist[[i]])
}
invisible(NULL)
}
|
/R/writeTpmap.R
|
no_license
|
HenrikBengtsson/affxparser
|
R
| false
| false
| 1,812
|
r
|
writeTpmap <- function(filename, bpmaplist, verbose = 0){
writeSequence <- function(seq){
if(length(setdiff(c("seqInfo", "pmx", "pmy", "probeseq", "startpos", "strand"),
names(seq))) != 0 ||
length(setdiff(c("groupname", "version", "name"),
names(seq$seqInfo))) != 0) {
cat(" ... skipping a sequence due to missing slots\n")
return(NULL)
}
seqInfo <- seq$seqInfo
if(length(setdiff(c("groupname", "version", "name"), names(seqInfo))) != 0) {
stop("Need a seqInfo component with 'groupname', 'version', 'name' sub-components")
}
writeLines(paste("#seq_group_name", seqInfo$groupname), con = out, sep = "\n")
writeLines(paste("#version", seqInfo$version), con = out, sep = "\n")
if(!is.null(seqInfo$parameters)) {
for(tag in names(seqInfo$parameters))
writeLines(paste("#", tag, " ", seqInfo$parameters[tag], sep = ""),
con = out, sep = "\n")
}
hits <- t(do.call(cbind, c(seq[c("probeseq", "strand")],
list(groupname = rep(seqInfo$name, length(seq$pmx))),
seq[c("startpos", "pmx", "pmy", "mmx", "mmy", "matchscore")])))
write(hits, file = out, ncolumns = nrow(hits), append = TRUE)
return(NULL)
} # writeSequence()
if (file.exists(filename)) {
stop("Could not write TPMAP file. File already exists: ", filename)
}
out <- file(filename, open = "w")
on.exit(close(out))
for(i in seq_along(bpmaplist)) {
if(verbose)
cat(paste("Writing sequence", names(bpmaplist)[i], "\n"))
writeSequence(bpmaplist[[i]])
}
invisible(NULL)
}
|
library(ggplot2)
MergedDataFor2013 <- merge.data.frame(FR2013,LE2013,by.x="Country.Code",by.y="Country")
head(MergedDataFor2013)
qplot(data=MergedDataFor2013, x=Fertility.Rate,y=Life2013,color=Region, shape=I(17), alpha=I(0.8),main="Fertility Rate vs Life Expectancy 2013")
|
/Plot of Fertility Rate and Life Expectancy 2013.R
|
no_license
|
AramidHaiju/R
|
R
| false
| false
| 278
|
r
|
library(ggplot2)
MergedDataFor2013 <- merge.data.frame(FR2013,LE2013,by.x="Country.Code",by.y="Country")
head(MergedDataFor2013)
qplot(data=MergedDataFor2013, x=Fertility.Rate,y=Life2013,color=Region, shape=I(17), alpha=I(0.8),main="Fertility Rate vs Life Expectancy 2013")
|
## Assignment2 - Caching the inverse of a matrix
## [makeCacheMatrix] given a matrix, returns an "object" that keeps a cache of
## the inverse of that matrix.
## Avaliable methods:
## - get : return currently set matrix
## - set : sets a new matrix, deletes old cache
## - setinv : caches the given inverse
## - getinv : returns the currently cached inverse (or NULL)
makeCacheMatrix <- function(x = matrix()) {
ix <- NULL
set <- function(y) {
x <<- y
ix <<- NULL
}
get <- function() x
setinv <- function(inv) ix <<- inv
getinv <- function() ix
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## [cacheSolve] given a CacheMatrix, returns the inverse of that matrix without
## recalculating if the matrix inverse has been previosly calculated.
cacheSolve <- function(x, ...) {
## Get cached inverse
ix <- x$getinv()
## Return the cached inverse if avaliable
if(!is.null(ix)) {
message("getting cached data")
return(ix)
}
# No cached inverse! Get original matrix
data <- x$get()
# Calculate the inverse
ix <- solve(data, ...)
# Cache the inverse
x$setinv(ix)
# Return inverse
ix
}
|
/cachematrix.R
|
no_license
|
mvilab/ProgrammingAssignment2
|
R
| false
| false
| 1,149
|
r
|
## Assignment2 - Caching the inverse of a matrix
## [makeCacheMatrix] given a matrix, returns an "object" that keeps a cache of
## the inverse of that matrix.
## Avaliable methods:
## - get : return currently set matrix
## - set : sets a new matrix, deletes old cache
## - setinv : caches the given inverse
## - getinv : returns the currently cached inverse (or NULL)
makeCacheMatrix <- function(x = matrix()) {
ix <- NULL
set <- function(y) {
x <<- y
ix <<- NULL
}
get <- function() x
setinv <- function(inv) ix <<- inv
getinv <- function() ix
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## [cacheSolve] given a CacheMatrix, returns the inverse of that matrix without
## recalculating if the matrix inverse has been previosly calculated.
cacheSolve <- function(x, ...) {
## Get cached inverse
ix <- x$getinv()
## Return the cached inverse if avaliable
if(!is.null(ix)) {
message("getting cached data")
return(ix)
}
# No cached inverse! Get original matrix
data <- x$get()
# Calculate the inverse
ix <- solve(data, ...)
# Cache the inverse
x$setinv(ix)
# Return inverse
ix
}
|
context("load export options testing")
# shorten table names
export_options_regular_short <- read_export_options(data_dir = system.file("extdata", "sT_exports", "BMD",
"s_export_CSV-xls_BMD_short_en_utf8.zip",
package = "secuTrialR"))
# long table names
export_options_regular_long <- read_export_options(data_dir = system.file("extdata", "sT_exports", "BMD",
"s_export_CSV-xls_BMD_long_en_utf8.zip",
package = "secuTrialR"))
# rectangular shorten table names
export_options_rect_short <- read_export_options(data_dir = system.file("extdata", "sT_exports", "BMD",
"s_export_CSV-xls_BMD_rt_short_en_utf8.zip",
package = "secuTrialR"))
# rectangular long table names
export_options_rect_long <- read_export_options(data_dir = system.file("extdata", "sT_exports", "BMD",
"s_export_CSV-xls_BMD_rt_long_en_utf8.zip",
package = "secuTrialR"))
# unzipped
bmd_unzipped <- read_export_options(data_dir = system.file("extdata", "sT_exports", "BMD",
"s_export_CSV-xls_BMD_short_en_utf8",
package = "secuTrialR"))
# duplicated meta
dup_meta <- read_export_options(system.file("extdata", "sT_exports", "longnames",
"s_export_CSV-xls_CTU05_long_meta_ref_miss_en_utf8.zip",
package = "secuTrialR"))
# ISO-8859-15
exp_opt_tes05_iso <- read_export_options(system.file("extdata", "sT_exports", "encodings",
"s_export_CSV-xls_TES05_short_ref_en_iso8859-15.zip",
package = "secuTrialR"))
# test encoding
test_that("Encoding parsed as expected.", {
expect_equal(export_options_regular_short$encoding, "UTF-8")
expect_equal(bmd_unzipped$encoding, "UTF-8")
expect_equal(exp_opt_tes05_iso$encoding, "ISO-8859-15")
})
# test shortened table names
test_that("Shorten names identified.", {
expect_true(bmd_unzipped$short_names)
expect_true(export_options_regular_short$short_names)
expect_false(export_options_regular_long$short_names)
expect_true(export_options_rect_short$short_names)
expect_false(export_options_rect_long$short_names)
})
# test zip
test_that("zip archive ending identified.", {
expect_false(bmd_unzipped$is_zip)
expect_true(export_options_regular_short$is_zip)
expect_true(export_options_regular_long$is_zip)
expect_true(export_options_rect_short$is_zip)
expect_true(export_options_rect_long$is_zip)
})
# test rectangular identification
test_that("Rectangular/regular export identified.", {
expect_true(export_options_rect_short$is_rectangular)
expect_true(export_options_rect_long$is_rectangular)
expect_false(export_options_regular_short$is_rectangular)
expect_false(bmd_unzipped$is_rectangular)
expect_false(export_options_regular_long$is_rectangular)
})
# test meta names
test_that("Meta names available.", {
expect_equal(as.vector(unlist(export_options_regular_short$meta_names)), c("fs", "cn", "ctr", "is",
"qs", "qac", "vp", "vpfs",
"atcn", "atcvp", "cts", "miv", "cl"))
expect_equal(as.vector(unlist(export_options_regular_long$meta_names)), c("forms", "casenodes",
"centres", "items",
"questions", "queries",
"visitplan", "visitplanforms",
"atcasenodes", "atcasevisitplans",
"comments", "miv", "cl"))
})
# prepare path to example export
export_location <- system.file("extdata", "sT_exports", "BMD",
"s_export_CSV-xls_BMD_short_en_utf8.zip",
package = "secuTrialR")
# load all export data
sT_export <- read_secuTrial_raw(data_dir = export_location)
# capture the print
captured_print <- capture.output(print(sT_export$export_options))
# test print.secutrialoptions
test_that("Print export options working.", {
expect_equal(captured_print[1], "secuTrial version: 5.3.4.6 ")
expect_equal(captured_print[2], "Time of export on server: 25.02.2019 - 15:14:27 (CET) ")
expect_equal(captured_print[6], "Seperator: '\t'")
expect_equal(captured_print[7], "14 files exported")
expect_equal(captured_print[9], "Reference values not exported - factorize not possible")
})
sT_export2 <- read_secuTrial_raw(data_dir = system.file("extdata", "sT_exports", "shortnames",
"s_export_CSV-xls_CTU05_short_miss_en_utf8.zip",
package = "secuTrialR"))
# project version
test_that("Project version parsing", {
expect_equal(sT_export$export_options$project_version, "(25.02.2019 - 13:13:44 (CET))")
expect_equal(export_options_regular_short$project_version, "(25.02.2019 - 13:13:44 (CET))")
expect_equal(export_options_regular_long$project_version, "(25.02.2019 - 13:13:44 (CET))")
expect_equal(sT_export2$export_options$project_version, "(30.04.2019 - 13:40:52 (CEST))")
expect_equal(bmd_unzipped$project_version, "(25.02.2019 - 13:13:44 (CET))")
})
# project name
test_that("Project name parsing", {
expect_equal(sT_export$export_options$project_name, "BONE MINERAL DENSITY")
expect_equal(export_options_regular_short$project_name, "BONE MINERAL DENSITY")
expect_equal(export_options_regular_long$project_name, "BONE MINERAL DENSITY")
expect_equal(sT_export2$export_options$project_name, "secuTrialR example CDMA")
expect_equal(bmd_unzipped$project_name, "BONE MINERAL DENSITY")
})
# duplicated meta data
test_that("Project version parsing", {
expect_false(sT_export$export_options$duplicate_meta)
expect_false(export_options_regular_short$duplicate_meta)
expect_false(export_options_regular_long$duplicate_meta)
expect_false(sT_export2$export_options$duplicate_meta)
expect_false(bmd_unzipped$duplicate_meta)
expect_true(dup_meta$duplicate_meta)
})
# test time of export
# manually checked all of these in the respective ExportOptions.html files
test_that("Time of export", {
expect_equal(sT_export$export_options$time_of_export, "25.02.2019 - 15:14:27 (CET)")
expect_equal(export_options_regular_long$time_of_export, "18.03.2019 - 10:47:03 (CET)")
expect_equal(sT_export2$export_options$time_of_export, "30.04.2019 - 15:29:45 (CEST)")
})
# errors for non CSV exports
test_that("Errored for non CSV format", {
# SAS
expect_error(read_export_options(data_dir = system.file("extdata", "sT_exports", "export_options",
"s_export_SAS_CTU05_20191115-092453_SAS.zip",
package = "secuTrialR")))
# SPSS
expect_error(read_export_options(data_dir = system.file("extdata", "sT_exports", "export_options",
"s_export_SPSS_CTU05_20191115-092020_SPSS.zip",
package = "secuTrialR")))
# CDISC
expect_error(read_export_options(data_dir = system.file("extdata", "sT_exports", "export_options",
"s_export_XML_CTU05_20191115-092559_CDISC.zip",
package = "secuTrialR")))
})
# success for CSV exports
eo_csv <- read_export_options(data_dir = system.file("extdata", "sT_exports", "export_options",
"s_export_CSV_CTU05_20191115-091627_CSV.zip",
package = "secuTrialR"))
test_that("Success for CSV format", {
expect_equal(eo_csv$format_info, "CSV format")
expect_equal(exp_opt_tes05_iso$format_info, "CSV format for MS Excel")
})
|
/tests/testthat/test-read_export_options.R
|
permissive
|
gillesdutilh/secuTrialR
|
R
| false
| false
| 8,771
|
r
|
context("load export options testing")
# shorten table names
export_options_regular_short <- read_export_options(data_dir = system.file("extdata", "sT_exports", "BMD",
"s_export_CSV-xls_BMD_short_en_utf8.zip",
package = "secuTrialR"))
# long table names
export_options_regular_long <- read_export_options(data_dir = system.file("extdata", "sT_exports", "BMD",
"s_export_CSV-xls_BMD_long_en_utf8.zip",
package = "secuTrialR"))
# rectangular shorten table names
export_options_rect_short <- read_export_options(data_dir = system.file("extdata", "sT_exports", "BMD",
"s_export_CSV-xls_BMD_rt_short_en_utf8.zip",
package = "secuTrialR"))
# rectangular long table names
export_options_rect_long <- read_export_options(data_dir = system.file("extdata", "sT_exports", "BMD",
"s_export_CSV-xls_BMD_rt_long_en_utf8.zip",
package = "secuTrialR"))
# unzipped
bmd_unzipped <- read_export_options(data_dir = system.file("extdata", "sT_exports", "BMD",
"s_export_CSV-xls_BMD_short_en_utf8",
package = "secuTrialR"))
# duplicated meta
dup_meta <- read_export_options(system.file("extdata", "sT_exports", "longnames",
"s_export_CSV-xls_CTU05_long_meta_ref_miss_en_utf8.zip",
package = "secuTrialR"))
# ISO-8859-15
exp_opt_tes05_iso <- read_export_options(system.file("extdata", "sT_exports", "encodings",
"s_export_CSV-xls_TES05_short_ref_en_iso8859-15.zip",
package = "secuTrialR"))
# test encoding
test_that("Encoding parsed as expected.", {
expect_equal(export_options_regular_short$encoding, "UTF-8")
expect_equal(bmd_unzipped$encoding, "UTF-8")
expect_equal(exp_opt_tes05_iso$encoding, "ISO-8859-15")
})
# test shortened table names
test_that("Shorten names identified.", {
expect_true(bmd_unzipped$short_names)
expect_true(export_options_regular_short$short_names)
expect_false(export_options_regular_long$short_names)
expect_true(export_options_rect_short$short_names)
expect_false(export_options_rect_long$short_names)
})
# test zip
test_that("zip archive ending identified.", {
expect_false(bmd_unzipped$is_zip)
expect_true(export_options_regular_short$is_zip)
expect_true(export_options_regular_long$is_zip)
expect_true(export_options_rect_short$is_zip)
expect_true(export_options_rect_long$is_zip)
})
# test rectangular identification
test_that("Rectangular/regular export identified.", {
expect_true(export_options_rect_short$is_rectangular)
expect_true(export_options_rect_long$is_rectangular)
expect_false(export_options_regular_short$is_rectangular)
expect_false(bmd_unzipped$is_rectangular)
expect_false(export_options_regular_long$is_rectangular)
})
# test meta names
test_that("Meta names available.", {
expect_equal(as.vector(unlist(export_options_regular_short$meta_names)), c("fs", "cn", "ctr", "is",
"qs", "qac", "vp", "vpfs",
"atcn", "atcvp", "cts", "miv", "cl"))
expect_equal(as.vector(unlist(export_options_regular_long$meta_names)), c("forms", "casenodes",
"centres", "items",
"questions", "queries",
"visitplan", "visitplanforms",
"atcasenodes", "atcasevisitplans",
"comments", "miv", "cl"))
})
# prepare path to example export
export_location <- system.file("extdata", "sT_exports", "BMD",
"s_export_CSV-xls_BMD_short_en_utf8.zip",
package = "secuTrialR")
# load all export data
sT_export <- read_secuTrial_raw(data_dir = export_location)
# capture the print
captured_print <- capture.output(print(sT_export$export_options))
# test print.secutrialoptions
test_that("Print export options working.", {
expect_equal(captured_print[1], "secuTrial version: 5.3.4.6 ")
expect_equal(captured_print[2], "Time of export on server: 25.02.2019 - 15:14:27 (CET) ")
expect_equal(captured_print[6], "Seperator: '\t'")
expect_equal(captured_print[7], "14 files exported")
expect_equal(captured_print[9], "Reference values not exported - factorize not possible")
})
sT_export2 <- read_secuTrial_raw(data_dir = system.file("extdata", "sT_exports", "shortnames",
"s_export_CSV-xls_CTU05_short_miss_en_utf8.zip",
package = "secuTrialR"))
# project version
test_that("Project version parsing", {
expect_equal(sT_export$export_options$project_version, "(25.02.2019 - 13:13:44 (CET))")
expect_equal(export_options_regular_short$project_version, "(25.02.2019 - 13:13:44 (CET))")
expect_equal(export_options_regular_long$project_version, "(25.02.2019 - 13:13:44 (CET))")
expect_equal(sT_export2$export_options$project_version, "(30.04.2019 - 13:40:52 (CEST))")
expect_equal(bmd_unzipped$project_version, "(25.02.2019 - 13:13:44 (CET))")
})
# project name
test_that("Project name parsing", {
expect_equal(sT_export$export_options$project_name, "BONE MINERAL DENSITY")
expect_equal(export_options_regular_short$project_name, "BONE MINERAL DENSITY")
expect_equal(export_options_regular_long$project_name, "BONE MINERAL DENSITY")
expect_equal(sT_export2$export_options$project_name, "secuTrialR example CDMA")
expect_equal(bmd_unzipped$project_name, "BONE MINERAL DENSITY")
})
# duplicated meta data
test_that("Project version parsing", {
expect_false(sT_export$export_options$duplicate_meta)
expect_false(export_options_regular_short$duplicate_meta)
expect_false(export_options_regular_long$duplicate_meta)
expect_false(sT_export2$export_options$duplicate_meta)
expect_false(bmd_unzipped$duplicate_meta)
expect_true(dup_meta$duplicate_meta)
})
# test time of export
# manually checked all of these in the respective ExportOptions.html files
test_that("Time of export", {
expect_equal(sT_export$export_options$time_of_export, "25.02.2019 - 15:14:27 (CET)")
expect_equal(export_options_regular_long$time_of_export, "18.03.2019 - 10:47:03 (CET)")
expect_equal(sT_export2$export_options$time_of_export, "30.04.2019 - 15:29:45 (CEST)")
})
# errors for non CSV exports
test_that("Errored for non CSV format", {
# SAS
expect_error(read_export_options(data_dir = system.file("extdata", "sT_exports", "export_options",
"s_export_SAS_CTU05_20191115-092453_SAS.zip",
package = "secuTrialR")))
# SPSS
expect_error(read_export_options(data_dir = system.file("extdata", "sT_exports", "export_options",
"s_export_SPSS_CTU05_20191115-092020_SPSS.zip",
package = "secuTrialR")))
# CDISC
expect_error(read_export_options(data_dir = system.file("extdata", "sT_exports", "export_options",
"s_export_XML_CTU05_20191115-092559_CDISC.zip",
package = "secuTrialR")))
})
# success for CSV exports
eo_csv <- read_export_options(data_dir = system.file("extdata", "sT_exports", "export_options",
"s_export_CSV_CTU05_20191115-091627_CSV.zip",
package = "secuTrialR"))
test_that("Success for CSV format", {
expect_equal(eo_csv$format_info, "CSV format")
expect_equal(exp_opt_tes05_iso$format_info, "CSV format for MS Excel")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_swne.R
\name{RunSWNE}
\alias{RunSWNE}
\alias{RunSWNE.seurat}
\alias{RunSWNE.matrix}
\title{Wrapper for the running SWNE analysis functions}
\usage{
\method{RunSWNE}{seurat}(object, dist.metric = "euclidean",
n.cores = 3, k, var.genes, loss = "mse", alpha.exp = 1.25,
snn.exp = 1)
\method{RunSWNE}{matrix}(data.matrix, dist.metric = "euclidean",
n.cores = 3, k, var.genes = rownames(data.matrix), loss = "mse",
alpha.exp = 1.25, snn.exp = 1, var.exp = 0.05)
}
\arguments{
\item{object}{A seurat-class object (normalised)}
\item{n.cores}{Number of cores to use (passed to FindNumFactors)}
\item{k}{Number of NMF factors (passed to RunNMF). If none given, will be derived from FindNumFactors.}
\item{var.genes}{vector to specify variable genes. Will infer from Suerat or use full dataset if not given.}
\item{loss}{loss function to use (passed to RunNMF)}
\item{alpha.exp}{Increasing alpha.exp increases how much the NMF factors "pull" the samples (passed to EmbedSWNE)}
\item{snn.exp}{Decreasing snn.exp increases the effect of the similarity matrix on the embedding (passed to EmbedSWNE)}
\item{data.matrix}{a data matrix (genes x cells) which has been pre-normalised}
\item{var.exp}{Proportion of genes selected from most variable}
\item{dist.use}{Similarity function to use for calculating factor positions (passed to EmbedSWNE). Options include pearson (correlation), IC (mutual information), cosine, euclidean.}
}
\value{
A list of factor (H.coords) and sample coordinates (sample.coords) in 2D
}
\description{
Wrapper for the running SWNE analysis functions
}
|
/man/RunSWNE.Rd
|
no_license
|
hlxie/swne
|
R
| false
| true
| 1,664
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_swne.R
\name{RunSWNE}
\alias{RunSWNE}
\alias{RunSWNE.seurat}
\alias{RunSWNE.matrix}
\title{Wrapper for the running SWNE analysis functions}
\usage{
\method{RunSWNE}{seurat}(object, dist.metric = "euclidean",
n.cores = 3, k, var.genes, loss = "mse", alpha.exp = 1.25,
snn.exp = 1)
\method{RunSWNE}{matrix}(data.matrix, dist.metric = "euclidean",
n.cores = 3, k, var.genes = rownames(data.matrix), loss = "mse",
alpha.exp = 1.25, snn.exp = 1, var.exp = 0.05)
}
\arguments{
\item{object}{A seurat-class object (normalised)}
\item{n.cores}{Number of cores to use (passed to FindNumFactors)}
\item{k}{Number of NMF factors (passed to RunNMF). If none given, will be derived from FindNumFactors.}
\item{var.genes}{vector to specify variable genes. Will infer from Suerat or use full dataset if not given.}
\item{loss}{loss function to use (passed to RunNMF)}
\item{alpha.exp}{Increasing alpha.exp increases how much the NMF factors "pull" the samples (passed to EmbedSWNE)}
\item{snn.exp}{Decreasing snn.exp increases the effect of the similarity matrix on the embedding (passed to EmbedSWNE)}
\item{data.matrix}{a data matrix (genes x cells) which has been pre-normalised}
\item{var.exp}{Proportion of genes selected from most variable}
\item{dist.use}{Similarity function to use for calculating factor positions (passed to EmbedSWNE). Options include pearson (correlation), IC (mutual information), cosine, euclidean.}
}
\value{
A list of factor (H.coords) and sample coordinates (sample.coords) in 2D
}
\description{
Wrapper for the running SWNE analysis functions
}
|
# Note: crude selection of first 50 etas used to test the extensibility of pretrained models on arbitrary nsam
# ideally, new data with nsam=n_genome should be generated and new models should be trained
library(mboost)
library(reshape2)
library(ggplot2)
source('../functions_boosting.R')
source('../functions_plot.R')
# === Initialise ====
pop<-'ZI'
tid<-'2L'
frompos<-'7400000'
topos<-'7600000'
width<-'5000'
filename<-paste('../../data/genomes/',pop,'_Chr',tid,'_',frompos,'_',topos,'_window',width,'.csv',sep='')
nuisance<-'pure'
# === New Data ====
new_data<-read.csv(filename)
# normalise by the training mean and sd
load(paste('../../data/rdas/traindata_197_100_',nuisance,'.rda',sep=''))
# subset the covariates
X<-new_data[,columns.X]
X<-t(apply(X,FUN=function(x){(x-mus_train)/sigmas_train},MARGIN=1))
X<-as.data.frame(X)
# === estimation per bp ====
load(paste('../../data/rdas/model_boosting_nsam197_',nuisance,'_nat0.rda',sep=''))
# includes model specific subset of columns (generalised from testing for robustness)
X<-X[,columns.model[2:length(columns.model)]]
# with analytic
pred_w<-new_data$theta_watterson/new_data$bp
pred_d<-new_data$theta_tajima/new_data$bp
pred_fu<-new_data$theta_fu/new_data$bp # we can choose freely either fu or eta1 here
# prediction with glm
#pred_glm<-predict(m_glm,newdata=X)/new_data$bp
# prediction with gam
pred_gam<-predict(m_gam,newdata=X)/new_data$bp
# === visualisation 1: boosting prediction against analytic ====
# just a quick one
#plot(pred_fu,pred_gam,xlim=c(0,0.05),ylim=c(0,0.05))
#abline(a=0,b=1)
# === visualisation 2: plot of theta per bp along chromosome ====
# source('functions_plot.R')
pred <- data.frame(list(region=as.numeric(row.names(new_data))*as.numeric(width)+as.numeric(frompos),
#glm=pred_glm,
GAM=pred_gam,
W=pred_w,
T=pred_d,
F=pred_fu
)
)
pred<-melt(pred,id='region')
colnames(pred)[2]<-"Methods"
locustheta<-ggplot(pred)+
geom_line(aes(x=region,y=value,colour=Methods))+
scale_colour_manual(values=c("red","blue","purple","black"))+
theme(plot.title=element_text(size=20,hjust = 0.5),
text=element_text(size=15)) +
ggtitle(expression(paste('Estimates of ',theta,' per base pair along 7.4-7.6 Mb at 5 kb intervals',sep=''))
) +
labs(
x="bp",
y=expression(paste("Estimated ",theta,sep=''))
)+
geom_hline(yintercept=0.016, linetype="dashed")
locustheta
#plot(new_data$test_tajima_d, type='l')
#plot(new_data$test_fu_li_d, type='l')
|
/src/two_genome_standardise_estimate.R
|
no_license
|
peerapongch/rhotheta
|
R
| false
| false
| 2,621
|
r
|
# Note: crude selection of first 50 etas used to test the extensibility of pretrained models on arbitrary nsam
# ideally, new data with nsam=n_genome should be generated and new models should be trained
library(mboost)
library(reshape2)
library(ggplot2)
source('../functions_boosting.R')
source('../functions_plot.R')
# === Initialise ====
pop<-'ZI'
tid<-'2L'
frompos<-'7400000'
topos<-'7600000'
width<-'5000'
filename<-paste('../../data/genomes/',pop,'_Chr',tid,'_',frompos,'_',topos,'_window',width,'.csv',sep='')
nuisance<-'pure'
# === New Data ====
new_data<-read.csv(filename)
# normalise by the training mean and sd
load(paste('../../data/rdas/traindata_197_100_',nuisance,'.rda',sep=''))
# subset the covariates
X<-new_data[,columns.X]
X<-t(apply(X,FUN=function(x){(x-mus_train)/sigmas_train},MARGIN=1))
X<-as.data.frame(X)
# === estimation per bp ====
load(paste('../../data/rdas/model_boosting_nsam197_',nuisance,'_nat0.rda',sep=''))
# includes model specific subset of columns (generalised from testing for robustness)
X<-X[,columns.model[2:length(columns.model)]]
# with analytic
pred_w<-new_data$theta_watterson/new_data$bp
pred_d<-new_data$theta_tajima/new_data$bp
pred_fu<-new_data$theta_fu/new_data$bp # we can choose freely either fu or eta1 here
# prediction with glm
#pred_glm<-predict(m_glm,newdata=X)/new_data$bp
# prediction with gam
pred_gam<-predict(m_gam,newdata=X)/new_data$bp
# === visualisation 1: boosting prediction against analytic ====
# just a quick one
#plot(pred_fu,pred_gam,xlim=c(0,0.05),ylim=c(0,0.05))
#abline(a=0,b=1)
# === visualisation 2: plot of theta per bp along chromosome ====
# source('functions_plot.R')
pred <- data.frame(list(region=as.numeric(row.names(new_data))*as.numeric(width)+as.numeric(frompos),
#glm=pred_glm,
GAM=pred_gam,
W=pred_w,
T=pred_d,
F=pred_fu
)
)
pred<-melt(pred,id='region')
colnames(pred)[2]<-"Methods"
locustheta<-ggplot(pred)+
geom_line(aes(x=region,y=value,colour=Methods))+
scale_colour_manual(values=c("red","blue","purple","black"))+
theme(plot.title=element_text(size=20,hjust = 0.5),
text=element_text(size=15)) +
ggtitle(expression(paste('Estimates of ',theta,' per base pair along 7.4-7.6 Mb at 5 kb intervals',sep=''))
) +
labs(
x="bp",
y=expression(paste("Estimated ",theta,sep=''))
)+
geom_hline(yintercept=0.016, linetype="dashed")
locustheta
#plot(new_data$test_tajima_d, type='l')
#plot(new_data$test_fu_li_d, type='l')
|
full_routine <- function(x,
parms,
foi_data,
adm_covariates,
all_squares,
all_predictors){
j <- x$exp_id
var_to_fit <- x$var
number_of_predictors <- x$no_pred
parms$no_predictors <- number_of_predictors
parms$dependent_variable <- var_to_fit
cat("exp id =", j, "\n")
cat("response variable =", var_to_fit, "\n")
cat("number of predictors =", number_of_predictors, "\n")
model_type <- paste0("model_", j)
grp_flds <- parms$grp_flds
base_info <- parms$base_info
foi_offset <- parms$foi_offset
# output dir -----------------------------------------------------------------
foi_dts_out_path <- file.path("output",
"EM_algorithm",
"best_fit_models",
model_type,
"adm_foi_data")
pxl_dts_out_path <- file.path("output",
"EM_algorithm",
"best_fit_models",
model_type,
"env_variables")
RF_out_path <- file.path("output",
"EM_algorithm",
"best_fit_models",
model_type,
"optimized_model_objects")
diagno_out_path <- file.path("output",
"EM_algorithm",
"best_fit_models",
model_type,
"diagnostics")
diagno_fig_path <- file.path("figures",
"EM_algorithm",
"best_fit_models",
model_type,
"diagnostics")
train_dts_path <- file.path("output",
"EM_algorithm",
"best_fit_models",
model_type,
"training_datasets")
all_pred_out_path <- file.path("output",
"EM_algorithm",
"best_fit_models",
model_type,
"adm_foi_predictions")
global_predictions_out_path <- file.path("output",
"predictions_world",
"best_fit_models",
model_type)
# ---------------------------------------------------------------------------
my_predictors <- all_predictors[1:number_of_predictors]
foi_data_2 <- preprocess_adm_data(parms, foi_data)
pxl_data_2 <- preprocess_pxl_data(parms, foi_data_2, all_squares)
write_out_rds(foi_data_2, foi_dts_out_path, "adm_foi_data.rds")
write_out_rds(pxl_data_2, pxl_dts_out_path, "env_vars_20km.rds")
training_dataset <- foi_data_2[, c(var_to_fit, my_predictors, "new_weight")]
RF_obj <- fit_ranger_RF(parms = parms,
dependent_variable = var_to_fit,
predictors = my_predictors,
training_dataset = training_dataset,
my_weights = "new_weight")
p_i <- make_ranger_predictions(mod_obj = RF_obj,
dataset = pxl_data_2,
sel_preds = my_predictors)
pxl_data_2$p_i <- p_i
names(foi_data_2)[names(foi_data_2) == var_to_fit] <- "o_j"
pxl_data_3 <- inner_join(pxl_data_2, foi_data_2[, c(grp_flds, "o_j")])
pxl_dts_grp <- pxl_data_3 %>%
group_by(.dots = grp_flds) %>%
summarise(pop_sqr_sum = sum(population))
pxl_data_4 <- left_join(pxl_data_3, pxl_dts_grp)
pxl_data_4$pop_weight <- pxl_data_4$population / pxl_data_4$pop_sqr_sum
RF_obj_optim <- exp_max_algorithm(parms = parms,
orig_dataset = foi_data_2,
pxl_dataset = pxl_data_4,
my_predictors = my_predictors,
RF_obj_path = RF_out_path,
RF_obj_name = "RF_obj.rds",
diagn_tab_path = diagno_out_path,
diagn_tab_name = "diagno_table.rds",
train_dts_path = train_dts_path,
train_dts_name = "train_dts.rds",
adm_dataset = adm_covariates)
data_to_plot <- readRDS(file.path(diagno_out_path, "diagno_table.rds"))
plot_EM_diagnostics(data_to_plot, diagno_fig_path, "diagnostics.png")
prediction_set <- make_ranger_predictions(RF_obj_optim,
pxl_data_2,
my_predictors)
join_all <- join_predictions(parms = parms,
foi_dataset = foi_data_2,
RF_obj = RF_obj_optim,
adm_dataset = adm_covariates,
my_predictors = my_predictors,
all_sqr_predictions = prediction_set,
sqr_dataset = pxl_data_2)
write_out_rds(join_all, all_pred_out_path, "all_scale_predictions.rds")
global_predictions <- make_ranger_predictions(RF_obj_optim,
all_squares,
my_predictors)
if(var_to_fit == "FOI"){
global_predictions <- global_predictions - foi_offset
}
if(var_to_fit == "Z"){
global_predictions <- (global_predictions - foi_offset) * all_squares$birth_rate * 35
}
global_predictions[global_predictions < 0] <- 0
ret <- cbind(all_squares[, base_info], best = global_predictions)
write_out_rds(ret, global_predictions_out_path, "response.rds")
}
|
/R/random_forest/full_routine.R
|
no_license
|
mrc-ide/DENV_risk_maps
|
R
| false
| false
| 6,164
|
r
|
full_routine <- function(x,
parms,
foi_data,
adm_covariates,
all_squares,
all_predictors){
j <- x$exp_id
var_to_fit <- x$var
number_of_predictors <- x$no_pred
parms$no_predictors <- number_of_predictors
parms$dependent_variable <- var_to_fit
cat("exp id =", j, "\n")
cat("response variable =", var_to_fit, "\n")
cat("number of predictors =", number_of_predictors, "\n")
model_type <- paste0("model_", j)
grp_flds <- parms$grp_flds
base_info <- parms$base_info
foi_offset <- parms$foi_offset
# output dir -----------------------------------------------------------------
foi_dts_out_path <- file.path("output",
"EM_algorithm",
"best_fit_models",
model_type,
"adm_foi_data")
pxl_dts_out_path <- file.path("output",
"EM_algorithm",
"best_fit_models",
model_type,
"env_variables")
RF_out_path <- file.path("output",
"EM_algorithm",
"best_fit_models",
model_type,
"optimized_model_objects")
diagno_out_path <- file.path("output",
"EM_algorithm",
"best_fit_models",
model_type,
"diagnostics")
diagno_fig_path <- file.path("figures",
"EM_algorithm",
"best_fit_models",
model_type,
"diagnostics")
train_dts_path <- file.path("output",
"EM_algorithm",
"best_fit_models",
model_type,
"training_datasets")
all_pred_out_path <- file.path("output",
"EM_algorithm",
"best_fit_models",
model_type,
"adm_foi_predictions")
global_predictions_out_path <- file.path("output",
"predictions_world",
"best_fit_models",
model_type)
# ---------------------------------------------------------------------------
my_predictors <- all_predictors[1:number_of_predictors]
foi_data_2 <- preprocess_adm_data(parms, foi_data)
pxl_data_2 <- preprocess_pxl_data(parms, foi_data_2, all_squares)
write_out_rds(foi_data_2, foi_dts_out_path, "adm_foi_data.rds")
write_out_rds(pxl_data_2, pxl_dts_out_path, "env_vars_20km.rds")
training_dataset <- foi_data_2[, c(var_to_fit, my_predictors, "new_weight")]
RF_obj <- fit_ranger_RF(parms = parms,
dependent_variable = var_to_fit,
predictors = my_predictors,
training_dataset = training_dataset,
my_weights = "new_weight")
p_i <- make_ranger_predictions(mod_obj = RF_obj,
dataset = pxl_data_2,
sel_preds = my_predictors)
pxl_data_2$p_i <- p_i
names(foi_data_2)[names(foi_data_2) == var_to_fit] <- "o_j"
pxl_data_3 <- inner_join(pxl_data_2, foi_data_2[, c(grp_flds, "o_j")])
pxl_dts_grp <- pxl_data_3 %>%
group_by(.dots = grp_flds) %>%
summarise(pop_sqr_sum = sum(population))
pxl_data_4 <- left_join(pxl_data_3, pxl_dts_grp)
pxl_data_4$pop_weight <- pxl_data_4$population / pxl_data_4$pop_sqr_sum
RF_obj_optim <- exp_max_algorithm(parms = parms,
orig_dataset = foi_data_2,
pxl_dataset = pxl_data_4,
my_predictors = my_predictors,
RF_obj_path = RF_out_path,
RF_obj_name = "RF_obj.rds",
diagn_tab_path = diagno_out_path,
diagn_tab_name = "diagno_table.rds",
train_dts_path = train_dts_path,
train_dts_name = "train_dts.rds",
adm_dataset = adm_covariates)
data_to_plot <- readRDS(file.path(diagno_out_path, "diagno_table.rds"))
plot_EM_diagnostics(data_to_plot, diagno_fig_path, "diagnostics.png")
prediction_set <- make_ranger_predictions(RF_obj_optim,
pxl_data_2,
my_predictors)
join_all <- join_predictions(parms = parms,
foi_dataset = foi_data_2,
RF_obj = RF_obj_optim,
adm_dataset = adm_covariates,
my_predictors = my_predictors,
all_sqr_predictions = prediction_set,
sqr_dataset = pxl_data_2)
write_out_rds(join_all, all_pred_out_path, "all_scale_predictions.rds")
global_predictions <- make_ranger_predictions(RF_obj_optim,
all_squares,
my_predictors)
if(var_to_fit == "FOI"){
global_predictions <- global_predictions - foi_offset
}
if(var_to_fit == "Z"){
global_predictions <- (global_predictions - foi_offset) * all_squares$birth_rate * 35
}
global_predictions[global_predictions < 0] <- 0
ret <- cbind(all_squares[, base_info], best = global_predictions)
write_out_rds(ret, global_predictions_out_path, "response.rds")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DoOR.data-package.R
\docType{data}
\name{Or85c}
\alias{Or85c}
\title{Or85c}
\format{
'data.frame': 693 obs. of 8 variables:
$ Class : Factor w/ 17 levels "acid","acids",..: NA 13 5 5 5 5 5 5 5 5 ...
$ Name : Factor w/ 690 levels "11-cis vaccenyl acetate",..: 634 675 240 613 283 239 363 436 458 341 ...
$ InChIKey : Factor w/ 693 levels "ACCRBMDJCPPJDX-UHFFFAOYSA-N",..: 482 612 549 252 548 418 196 577 41 462 ...
$ CID : Factor w/ 687 levels "1001","10050",..: 686 680 139 15 220 189 483 610 564 468 ...
$ CAS : Factor w/ 677 levels "1001-45-2","10032-13-0",..: 676 591 212 100 379 586 231 114 62 200 ...
$ Kreher.2005.EN : int 16 51 NA NA NA NA NA NA NA NA ...
$ Kreher.2008.EN : int 9 NA NA NA NA NA NA NA NA NA ...
$ Montague.2011.EN: int 5 47 NA NA NA NA NA NA NA NA ...
}
\description{
DoOR response data for responding unit Or85c. Please find detailed
information on the respective sources of the data in door_dataset_info.
}
\keyword{DoOR}
\keyword{Or85c}
\keyword{dataset}
\keyword{responding_unit}
|
/man/Or85c.Rd
|
no_license
|
ropensci/DoOR.data
|
R
| false
| true
| 1,144
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DoOR.data-package.R
\docType{data}
\name{Or85c}
\alias{Or85c}
\title{Or85c}
\format{
'data.frame': 693 obs. of 8 variables:
$ Class : Factor w/ 17 levels "acid","acids",..: NA 13 5 5 5 5 5 5 5 5 ...
$ Name : Factor w/ 690 levels "11-cis vaccenyl acetate",..: 634 675 240 613 283 239 363 436 458 341 ...
$ InChIKey : Factor w/ 693 levels "ACCRBMDJCPPJDX-UHFFFAOYSA-N",..: 482 612 549 252 548 418 196 577 41 462 ...
$ CID : Factor w/ 687 levels "1001","10050",..: 686 680 139 15 220 189 483 610 564 468 ...
$ CAS : Factor w/ 677 levels "1001-45-2","10032-13-0",..: 676 591 212 100 379 586 231 114 62 200 ...
$ Kreher.2005.EN : int 16 51 NA NA NA NA NA NA NA NA ...
$ Kreher.2008.EN : int 9 NA NA NA NA NA NA NA NA NA ...
$ Montague.2011.EN: int 5 47 NA NA NA NA NA NA NA NA ...
}
\description{
DoOR response data for responding unit Or85c. Please find detailed
information on the respective sources of the data in door_dataset_info.
}
\keyword{DoOR}
\keyword{Or85c}
\keyword{dataset}
\keyword{responding_unit}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NutsDual.R, R/hmc.R
\name{Leapfrog}
\alias{Leapfrog}
\alias{Leapfrog}
\title{Leapfrog}
\usage{
Leapfrog(theta, r, epsilon, L)
Leapfrog(theta, r, epsilon, L)
}
\arguments{
\item{theta}{starting position}
\item{r}{starting momentum}
\item{epsilon}{step size}
\item{L}{callable function: returns the value of log posterior and the gradient of log posterior prbability at given input}
\item{theta}{starting position}
\item{r}{starting momentum}
\item{epsilon}{step size}
\item{L}{callable function: returns the value of log posterior and the gradient of log posterior probability at given input}
}
\value{
the list of updated theta, r and the log posterior value at the updated point
the list of updated theta, r and the log posterior value at the updated point
}
\description{
This function perform a leapfrog step. This function is a modified version of Leapfrog in the paper. It returns etra values: log posterior value and gradient of log posterior at the new position theta.tilde
This function performs a leapfrog step. This function is a modified version of Leapfrog in the paper.
It returns extra values: log posterior value and gradient of log posterior at the new position theta.tilde
}
|
/man/Leapfrog.Rd
|
no_license
|
JingyueLu/NUTS
|
R
| false
| true
| 1,280
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NutsDual.R, R/hmc.R
\name{Leapfrog}
\alias{Leapfrog}
\alias{Leapfrog}
\title{Leapfrog}
\usage{
Leapfrog(theta, r, epsilon, L)
Leapfrog(theta, r, epsilon, L)
}
\arguments{
\item{theta}{starting position}
\item{r}{starting momentum}
\item{epsilon}{step size}
\item{L}{callable function: returns the value of log posterior and the gradient of log posterior prbability at given input}
\item{theta}{starting position}
\item{r}{starting momentum}
\item{epsilon}{step size}
\item{L}{callable function: returns the value of log posterior and the gradient of log posterior probability at given input}
}
\value{
the list of updated theta, r and the log posterior value at the updated point
the list of updated theta, r and the log posterior value at the updated point
}
\description{
This function perform a leapfrog step. This function is a modified version of Leapfrog in the paper. It returns etra values: log posterior value and gradient of log posterior at the new position theta.tilde
This function performs a leapfrog step. This function is a modified version of Leapfrog in the paper.
It returns extra values: log posterior value and gradient of log posterior at the new position theta.tilde
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BrainRegion3D.R
\docType{methods}
\name{[,ROIVolume,numeric,missing,ANY-method}
\alias{[,ROIVolume,logical,logical,ANY-method}
\alias{[,ROIVolume,logical,missing,ANY-method}
\alias{[,ROIVolume,missing,logical,ANY-method}
\alias{[,ROIVolume,missing,numeric,ANY-method}
\alias{[,ROIVolume,numeric,missing,ANY-method}
\alias{[,ROIVolume,numeric,numeric,ANY-method}
\title{subset an \code{ROIVolume}}
\usage{
\S4method{[}{ROIVolume,numeric,missing,ANY}(x, i, j, drop)
\S4method{[}{ROIVolume,logical,missing,ANY}(x, i, j, drop)
\S4method{[}{ROIVolume,numeric,numeric,ANY}(x, i, j, drop)
\S4method{[}{ROIVolume,missing,numeric,ANY}(x, i, j, drop)
\S4method{[}{ROIVolume,missing,logical,ANY}(x, i, j, drop)
\S4method{[}{ROIVolume,logical,logical,ANY}(x, i, j, drop)
}
\arguments{
\item{x}{the object}
\item{i}{first index}
\item{j}{second index}
\item{drop}{drop dimension}
}
\description{
subset an \code{ROIVolume}
}
|
/man/vol_subset-methods.Rd
|
no_license
|
muschellij2/neuroim
|
R
| false
| true
| 999
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BrainRegion3D.R
\docType{methods}
\name{[,ROIVolume,numeric,missing,ANY-method}
\alias{[,ROIVolume,logical,logical,ANY-method}
\alias{[,ROIVolume,logical,missing,ANY-method}
\alias{[,ROIVolume,missing,logical,ANY-method}
\alias{[,ROIVolume,missing,numeric,ANY-method}
\alias{[,ROIVolume,numeric,missing,ANY-method}
\alias{[,ROIVolume,numeric,numeric,ANY-method}
\title{subset an \code{ROIVolume}}
\usage{
\S4method{[}{ROIVolume,numeric,missing,ANY}(x, i, j, drop)
\S4method{[}{ROIVolume,logical,missing,ANY}(x, i, j, drop)
\S4method{[}{ROIVolume,numeric,numeric,ANY}(x, i, j, drop)
\S4method{[}{ROIVolume,missing,numeric,ANY}(x, i, j, drop)
\S4method{[}{ROIVolume,missing,logical,ANY}(x, i, j, drop)
\S4method{[}{ROIVolume,logical,logical,ANY}(x, i, j, drop)
}
\arguments{
\item{x}{the object}
\item{i}{first index}
\item{j}{second index}
\item{drop}{drop dimension}
}
\description{
subset an \code{ROIVolume}
}
|
# estimamos con abc
library(circular)
library(Rcpp)
library(tidyverse)
library(mvtnorm)
library(abc)
library(randomForest)
# ==============================================================
# FUNCIONES
# ==============================================================
source('SSM/funaux.R')
Rcpp::sourceCpp('SSM/cpp/abc_crw.cpp')
Rcpp::sourceCpp('SSM/cpp/PathelementsCpp.cpp')
Rcpp::sourceCpp('SSM/cpp/RW_exp_cor.cpp')
Rcpp::sourceCpp('SSM/cpp/cppObs.cpp')
# 1.2 funciones para calcular stat y simular datos
# funcion que simula datos a partir del valor de: w, k, dt, nsteps
simdata <- function(ws, ks, dt.list, nsteps) {
xx <- cppRW_exp_cor(k=ks, w= ws, ns=nsteps, maxx= Inf) # simulate RW using the cpp function
names(dt.list) <- paste('dt', dt.list, sep='_')
lapply(dt.list, function(z) with(xx, cppObs(x=x, y=y, t=t, dt=z) ) ) # these are the observed data
}
# funcion que calcula los estadisticos a un conjunto de datos
# no me queda claro para que se usa el 'nobs'
stat_fun <- function( dd ) {
ps = PathelementsCpp(dd$sx,dd$sy)
bb = acf(circular(ps$direction),plot=FALSE)
ct = mean(cos(ps$turns))
st = mean(sin(ps$turns))
bo = sd(ps$steps)#/abs(mean(ps$steps))
# aa = acf(ps$steps,plot=FALSE)
tr2 = mean( diff(dd$sx)^2 ) /+ mean( diff(dd$sy)^2 )
r2 = sd(dd$sx)+sd(dd$sy)
mx <- which.max( abs(bb$acf[-1]) )
sale <-c(mean(ps$steps),
sd(ps$turns),
cdt2(ps$steps,dd$st[2:length(dd$st)]),
sd(ps$steps),
mean(bb$acf[2:6]),
bb$acf[mx+1],
sqrt((mean(cos(ps$turns)))^2+(mean(sin(ps$turns)))^2),
it(ps$steps,dd$sx,dd$sy),
si(ct,st,mean(ps$steps),bo),
tr2
)
# no uso r2 mean(bb$acf),
# incluyo corr en los valores iniciales y el lag de la max corr
return(sale)
}
#stat_fun(oz)
# funcion que simula un dato y calcula el stat
f1 <- function( par, dt.list, ns ) {
simdata(ws=par[1], ks=par[2], dt.list = dt.list, nsteps=ns) %>%
lapply( stat_fun )
}
# ==============================================================
# un solo conjunto de datos 'reales', con 3 dt = .05, .5, 5
# tener cuidado que los nsteps, y dt.list coincidan con los que se
# usaron para obtener las simulaciones
t_w <- 2
t_k <- 20
data.obs <- simdata(ws=t_w, ks=t_k, dt.list = list(.05, .5, 5), nsteps=800)
stat.obs <- lapply(data.obs, stat_fun) %>%
bind_rows() %>%
mutate( stat.nm = paste('T', 1:10, sep='') ) %>%
gather(dt, stat.val, starts_with('dt') ) %>%
spread(stat.nm, stat.val)
# funcion que estima dos ajuste por abc:
# usando los summary stat
# usando un modelo RF para obterner una transformacion de los summary stat
abc_fn <- function(d) {
# primero hago los modelos RF
rf.w <- randomForest( s_w ~ . , data = dplyr::select(d, -s_k, -dt) )
rf.k <- randomForest( s_k ~ . , data = dplyr::select(d, -s_w,-dt) )
rf.stsim <- data_frame(s_w = predict(rf.w), s_k = predict(rf.k) )
stobs = stat.obs %>% filter(dt == d$dt[1])
rf.stat.obs <- c( predict(rf.w, newdata = stobs ), predict(rf.k, newdata = stobs ) )
# abc usando los summary stat
out <- abc(target = as.numeric(stobs[-1]),
param = d[, 1:2],
sumstat = d[, 4:13] , tol = .5,
method='neuralnet')
# abc usando los RF de summary stat
out.rf <- abc(target = rf.stat.obs,
param = d[, 1:2],
sumstat = rf.stsim , tol = .5,
method='neuralnet')
list(out, out.rf)
}
# Usamos dos previas, para cada previa podemos usar, los summary stat o un rf de los summary stat
statsim.unif <- readRDS('SSM/statsim_unif.rds')
statsim.norm <- readRDS('SSM/statsim_norm.rds')
out.unif <- statsim.unif %>%
split.data.frame( factor(statsim.unif$dt) ) %>%
lapply( abc_fn )
out.norm <- statsim.norm %>%
split.data.frame( factor(statsim.norm$dt) ) %>%
lapply( abc_fn )
saveRDS(out.unif, file='SSM/out_unif.rds')
saveRDS(out.norm, file='SSM/out_norm.rds')
|
/SSM/nacho_estima.R
|
no_license
|
sofiar/SSM
|
R
| false
| false
| 3,966
|
r
|
# estimamos con abc
library(circular)
library(Rcpp)
library(tidyverse)
library(mvtnorm)
library(abc)
library(randomForest)
# ==============================================================
# FUNCIONES
# ==============================================================
source('SSM/funaux.R')
Rcpp::sourceCpp('SSM/cpp/abc_crw.cpp')
Rcpp::sourceCpp('SSM/cpp/PathelementsCpp.cpp')
Rcpp::sourceCpp('SSM/cpp/RW_exp_cor.cpp')
Rcpp::sourceCpp('SSM/cpp/cppObs.cpp')
# 1.2 funciones para calcular stat y simular datos
# funcion que simula datos a partir del valor de: w, k, dt, nsteps
simdata <- function(ws, ks, dt.list, nsteps) {
xx <- cppRW_exp_cor(k=ks, w= ws, ns=nsteps, maxx= Inf) # simulate RW using the cpp function
names(dt.list) <- paste('dt', dt.list, sep='_')
lapply(dt.list, function(z) with(xx, cppObs(x=x, y=y, t=t, dt=z) ) ) # these are the observed data
}
# funcion que calcula los estadisticos a un conjunto de datos
# no me queda claro para que se usa el 'nobs'
stat_fun <- function( dd ) {
ps = PathelementsCpp(dd$sx,dd$sy)
bb = acf(circular(ps$direction),plot=FALSE)
ct = mean(cos(ps$turns))
st = mean(sin(ps$turns))
bo = sd(ps$steps)#/abs(mean(ps$steps))
# aa = acf(ps$steps,plot=FALSE)
tr2 = mean( diff(dd$sx)^2 ) /+ mean( diff(dd$sy)^2 )
r2 = sd(dd$sx)+sd(dd$sy)
mx <- which.max( abs(bb$acf[-1]) )
sale <-c(mean(ps$steps),
sd(ps$turns),
cdt2(ps$steps,dd$st[2:length(dd$st)]),
sd(ps$steps),
mean(bb$acf[2:6]),
bb$acf[mx+1],
sqrt((mean(cos(ps$turns)))^2+(mean(sin(ps$turns)))^2),
it(ps$steps,dd$sx,dd$sy),
si(ct,st,mean(ps$steps),bo),
tr2
)
# no uso r2 mean(bb$acf),
# incluyo corr en los valores iniciales y el lag de la max corr
return(sale)
}
#stat_fun(oz)
# funcion que simula un dato y calcula el stat
f1 <- function( par, dt.list, ns ) {
simdata(ws=par[1], ks=par[2], dt.list = dt.list, nsteps=ns) %>%
lapply( stat_fun )
}
# ==============================================================
# un solo conjunto de datos 'reales', con 3 dt = .05, .5, 5
# tener cuidado que los nsteps, y dt.list coincidan con los que se
# usaron para obtener las simulaciones
t_w <- 2
t_k <- 20
data.obs <- simdata(ws=t_w, ks=t_k, dt.list = list(.05, .5, 5), nsteps=800)
stat.obs <- lapply(data.obs, stat_fun) %>%
bind_rows() %>%
mutate( stat.nm = paste('T', 1:10, sep='') ) %>%
gather(dt, stat.val, starts_with('dt') ) %>%
spread(stat.nm, stat.val)
# funcion que estima dos ajuste por abc:
# usando los summary stat
# usando un modelo RF para obterner una transformacion de los summary stat
abc_fn <- function(d) {
# primero hago los modelos RF
rf.w <- randomForest( s_w ~ . , data = dplyr::select(d, -s_k, -dt) )
rf.k <- randomForest( s_k ~ . , data = dplyr::select(d, -s_w,-dt) )
rf.stsim <- data_frame(s_w = predict(rf.w), s_k = predict(rf.k) )
stobs = stat.obs %>% filter(dt == d$dt[1])
rf.stat.obs <- c( predict(rf.w, newdata = stobs ), predict(rf.k, newdata = stobs ) )
# abc usando los summary stat
out <- abc(target = as.numeric(stobs[-1]),
param = d[, 1:2],
sumstat = d[, 4:13] , tol = .5,
method='neuralnet')
# abc usando los RF de summary stat
out.rf <- abc(target = rf.stat.obs,
param = d[, 1:2],
sumstat = rf.stsim , tol = .5,
method='neuralnet')
list(out, out.rf)
}
# Usamos dos previas, para cada previa podemos usar, los summary stat o un rf de los summary stat
statsim.unif <- readRDS('SSM/statsim_unif.rds')
statsim.norm <- readRDS('SSM/statsim_norm.rds')
out.unif <- statsim.unif %>%
split.data.frame( factor(statsim.unif$dt) ) %>%
lapply( abc_fn )
out.norm <- statsim.norm %>%
split.data.frame( factor(statsim.norm$dt) ) %>%
lapply( abc_fn )
saveRDS(out.unif, file='SSM/out_unif.rds')
saveRDS(out.norm, file='SSM/out_norm.rds')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Laplace.R
\name{buildLaplace}
\alias{buildLaplace}
\alias{laplace}
\alias{Laplace}
\title{Laplace approximation}
\usage{
buildLaplace(
model,
paramNodes,
randomEffectsNodes,
calcNodes,
calcNodesOther,
control = list()
)
}
\arguments{
\item{model}{a NIMBLE model object, such as returned by \code{nimbleModel}.
The model must have automatic derivatives (AD) turned on, e.g. by using
\code{buildDerivs=TRUE} in \code{nimbleModel}.}
\item{paramNodes}{a character vector of names of parameter nodes in the
model; defaults are provided by \code{\link{setupMargNodes}}.
Alternatively, \code{paramNodes} can be a list in the format returned by
\code{setupMargNodes}, in which case \code{randomEffectsNodes},
\code{calcNodes}, and \code{calcNodesOther} are not needed (and will be
ignored).}
\item{randomEffectsNodes}{a character vector of names of continuous unobserved
(latent) nodes to marginalize (integrate) over using Laplace approximation;
defaults are provided by \code{\link{setupMargNodes}}.}
\item{calcNodes}{a character vector of names of nodes for calculating the
integrand for Laplace approximation; defaults are provided by
\code{\link{setupMargNodes}}. There may be deterministic nodes between
\code{paramNodes} and \code{calcNodes}. These will be included in
calculations automatically and thus do not need to be included in
\code{calcNodes} (but there is no problem if they are).}
\item{calcNodesOther}{a character vector of names of nodes for calculating
terms in the log-likelihood that do not depend on any
\code{randomEffectsNodes}, and thus are not part of the marginalization,
but should be included for purposes of finding the MLE. This defaults to
stochastic nodes that depend on \code{paramNodes} but are not part of and
do not depend on \code{randomEffectsNodes}. There may be deterministic
nodes between \code{paramNodes} and \code{calcNodesOther}. These will be
included in calculations automatically and thus do not need to be included
in \code{calcNodesOther} (but there is no problem if they are).}
\item{control}{a named list for providing additional settings used in Laplace
approximation. See \code{control} section below.}
}
\description{
Build a Laplace approximation algorithm for a given NIMBLE model.
}
\section{\code{buildLaplace}}{
\code{buildLaplace} is the main function for constructing the Laplace
approximation for a given model or part of a model.
See method \code{summary} below and the separation function
\code{\link{summaryLaplace}} for processing maximum likelihood estimates
obtained by method \code{findMLE} below.
Any of the input node vectors, when provided, will be processed using
\code{nodes <- model$expandNodeNames(nodes)}, where \code{nodes} may be
\code{paramNodes}, \code{randomEffectsNodes}, and so on. This step allows
any of the inputs to include node-name-like syntax that might contain
multiple nodes. For example, \code{paramNodes = 'beta[1:10]'} can be
provided if there are actually 10 scalar parameters, 'beta[1]' through
'beta[10]'. The actual node names in the model will be determined by the
\code{exapndNodeNames} step.
In many (but not all) cases, one only needs to provide a NIMBLE model object
and then the function will construct reasonable defaults necessary for
Laplace approximation to marginalize over all continuous latent states
(aka random effects) in a model. The default values for the four groups of
nodes are obtained by calling \code{\link{setupMargNodes}}, whose arguments
match those here (except for a few arguments which are taken from control
list elements here).
\code{setupMargNodes} tries to give sensible defaults from
any combination of \code{paramNodes}, \code{randomEffectsNodes},
\code{calcNodes}, and \code{calcNodesOther} that are provided. For example,
if you provide only \code{randomEffectsNodes} (perhaps you want to
marginalize over only some of the random effects in your model),
\code{setupMargNodes} will try to determine appropriate choices for the
others.
These defaults make general assumptions such as that
\code{randomEffectsNodes} have \code{paramNodes} as parents. However, The
steps for determining defaults are not simple, and it is possible that they
will be refined in the future. It is also possible that they simply don't
give what you want for a particular model. One example where they will not
give desired results can occur when random effects have no prior
parameters, such as `N(0,1)` nodes that will be multiplied by a scale
factor (e.g. sigma) and added to other explanatory terms in a model. Such
nodes look like top-level parameters in terms of model structure, so
you must provide a \code{randomEffectsNodes} argument to indicate which
they are.
It can be helpful to use \code{setupMargNodes} directly to see exactly how
nodes will be arranged for Laplace approximation. For example, you may want
to verify the choice of \code{randomEffectsNodes} or get the order of
parameters it has established to use for making sense of the MLE and
results from the \code{summary} method. One can also call
\code{setupMargNodes}, customize the returned list, and then provide that
to \code{buildLaplace} as \code{paramNodes}. In that case,
\code{setupMargNodes} will not be called (again) by \code{buildLaplace}.
If \code{setupMargNodes} is emitting an unnecessary warning, simply use
\code{control=list(check=FALSE)}.
If any \code{paramNodes} (parameters) or \code{randomEffectsNodes} (random
effects / latent states) have constraints on the range of valid values
(because of the distribution they follow), they will be used on a
transformed scale determined by \code{parameterTransform}. This means the
Laplace approximation itself will be done on the transformed scale for
random effects and finding the MLE will be done on the transformed scale
for parameters. For parameters, prior distributions are not included in
calculations, but they are used to determine valid parameter ranges. For
example, if \code{sigma} is a standard deviation, you can declare it with a
prior such as \code{sigma ~ dhalfflat()} to indicate that it must be
greater than 0.
For default determination of parameters, all parameters must have a prior
distribution simply to indicate the range of valid values. For a param
\code{p} that has no constraint, a simple choice is \code{p ~ dflat()}.
The object returned by \code{buildLaplace} is a nimbleFunction object with
numerous methods (functions). The most useful ones are:
\itemize{
\item \code{calcLogLik(p, trans)}. Calculate the Laplace approximation to
the marginal log-likelihood function at parameter value \code{p}, which
(if \code{trans} is FALSE, which is the default) should match the order
of \code{paramNodes}. For any non-scalar nodes in \code{paramNodes},
the order within the node is column-major (which can be seen for R
objects using \code{as.numeric}). Return value is the scalar
(approximate, marginal) log likelihood.
If \code{trans} is TRUE, then \code{p} is the vector of parameters on
the transformed scale, if any, described above. In this case, the
parameters on the original scale (as the model was written) will be
determined by calling the method \code{pInverseTransform(p)}. Note that
the length of the parameter vector on the transformed scale might not
be the same as on the original scale (because some constraints of
non-scalar parameters result in fewer free transformed parameters than
original parameters).
\item \code{calcLaplace(p, trans)}. This is the same as \code{calcLogLik}.
\item \code{findMLE(pStart, method, hessian)}. Find the maximum likelihood
estimates of parameters using the Laplace-approximated marginal
likelihood. Arguments include \code{pStart}: initial parameter values
(defaults to parameter values currently in the model);
\code{method}: (outer) optimization method to use in \code{optim}
(defaults to "BFGS"); and
\code{hessian}: whether to calculate and return the Hessian matrix
(defaults to \code{TRUE}). Second derivatives in the Hessian are
determined by finite differences of the gradients obtained by
automatic differentiation (AD). Return value is a nimbleList of type
\code{optimResultNimbleList}, similar to what is returned by R's
optim. See \code{help(nimOptim)}.
\item \code{summary(MLEoutput, originalScale, randomEffectsStdError,
jointCovariance)}. Summarize the maximum likelihood estimation
results, given object \code{MLEoutput} that was returned by
\code{findMLE}. The summary can include a covariance matrix for the
parameters, the random effects, or both),
and these can be returned on the original parameter scale or on the
(potentially) transformed scale(s) used in estimation.
In more detail, \code{summary} accepts the following optional arguments:
\itemize{
\item \code{originalScale}. Logical. If TRUE, the function returns
results on the original scale(s) of parameters and random effects;
otherwise, it returns results on the transformed scale(s). If there
are no constraints, the two scales are identical. Defaults to TRUE.
\item \code{randomEffectsStdError}. Logical. If TRUE, standard
errors of random effects will be calculated.
Defaults to FALSE.
\item \code{jointCovariance}. Logical. If TRUE, the joint
variance-covariance matrix of the parameters and the random effects
will be returned. If FALSE, the variance-covariance matrix of the
parameters will be returned. Defaults to FALSE.
}
The object returned by \code{summary} is a nimbleList with elements:
\itemize{
\item \code{params}. A list that contains estimates and standard
errors of parameters (on the original or transformed scale, as
chosen by \code{originalScale}).
\item \code{randomEffects}. A list that contains estimates of random
effects and, if requested (\code{randomEffectsStdError=TRUE})
their standard errors, on original or transformed scale. Standard
errors are calculated following the generalized delta method of
Kass and Steffey (1989).
\item \code{vcov}. If requested (i.e.
\code{jointCovariance=TRUE}), the joint variance-covariance
matrix of the parameters and random effects, on original or
transformed scale. If \code{jointCovariance=FALSE}, the
covariance matrix of the parameters, on original or transformed
scale.
\item \code{scale}. \code{"original"} or \code{"transformed"}, the
scale on which results were requested.
}
}
Additional methods to access or control more details of the Laplace approximation include:
\itemize{
\item \code{getNodeNamesVec(returnParams)}. Return a vector (>1) of names
of parameters/random effects nodes, according to \code{returnParams =
TRUE/FALSE}. Use this if there is more than one node.
\item \code{getNodeNameSingle(returnParams)}. Return the name of a
single parameter/random effect node, according to \code{returnParams =
TRUE/FALSE}. Use this if there is only one node.
\item \code{setMethod(method)}. Set method ID for calculating the Laplace
approximation and gradient: 1 (\code{Laplace1}), 2 (\code{Laplace2},
default method), or 3 (\code{Laplace3}). See below for more details. Users
wanting to explore efficiency can try switching from method 2 (default) to
methods 1 or 3 and comparing performance. The first Laplace approximation
with each method will be (much) slower than subsequent Laplace
approximations.
\item \code{getMethod()}. Return the current method ID for Laplace.
\item \code{gr_logLik(p, trans)}. Gradient of the Laplace-approximated
marginal log-likelihood at parameter value \code{p}. Argument \code{trans}
is similar to that in \code{calcLaplace}. If there are multiple parameters,
the vector \code{p} is given in the order of parameter names returned by
\code{getNodeNamesVec(returnParams=TRUE)}.
\item \code{gr_Laplace(p, trans)}. This is the same as \code{gr_logLik}.
\item \code{otherLogLik(p)}. Calculate the \code{calcNodesOther}
nodes, which returns the log-likelihood of the parts of the model that are
not included in the Laplace approximation.
\item \code{gr_otherLogLik(p)}. Gradient (vector of derivatives with
respect to each parameter) of \code{otherLogLik(p)}. Results should
match \code{gr_otherLogLik_internal(p)} but may be more efficient after
the first call.
}
Finally, methods that are primarily for internal use by other methods include:
\itemize{
\item \code{p_transformed_gr_Laplace(pTransform)}. Gradient of the Laplace
approximation (\code{p_transformed_Laplace(pTransform)}) at transformed
(unconstrained) parameter value \code{pTransform}.
\item \code{pInverseTransform(pTransform)}. Back-transform the transformed
parameter value \code{pTransform} to original scale.
\item \code{derivs_pInverseTransform(pTransform, order)}. Derivatives of
the back-transformation (i.e. inverse of parameter transformation) with
respect to transformed parameters at \code{pTransform}. Derivative order
is given by \code{order} (any of 0, 1, and/or 2).
\item \code{reInverseTransform(reTrans)}. Back-transform the transformed
random effects value \code{reTrans} to original scale.
\item \code{derivs_reInverseTransform(reTrans, order)}. Derivatives of the
back-transformation (i.e. inverse of random effects transformation) with
respect to transformed random effects at \code{reTrans}. Derivative order
is given by \code{order} (any of 0, 1, and/or 2).
\item \code{optimRandomEffects(pTransform)}. Calculate the optimized
random effects given transformed parameter value \code{pTransform}. The
optimized random effects are the mode of the conditional distribution of
random effects given data at parameters \code{pTransform}, i.e. the
calculation of \code{calcNodes}.
\item \code{inverse_negHess(p, reTransform)}. Calculate the inverse of the
negative Hessian matrix of the joint (parameters and random effects)
log-likelihood with respect to transformed random effects, evaluated at
parameter value \code{p} and transformed random effects
\code{reTransform}.
\item \code{hess_logLik_wrt_p_wrt_re(p, reTransform)}. Calculate the
Hessian matrix of the joint log-likelihood with respect to parameters and
transformed random effects, evaluated at parameter value \code{p} and
transformed random effects \code{reTransform}.
\item \code{one_time_fixes()}. Users never need to run this. Is is called
when necessary internally to fix dimensionality issues if there is only
one parameter in the model.
\item \code{p_transformed_Laplace(pTransform)}. Laplace approximation at
transformed (unconstrained) parameter value \code{pTransform}. To
make maximizing the Laplace likelihood unconstrained, an automated
transformation via \code{\link{parameterTransform}} is performed on
any parameters with constraints indicated by their priors (even
though the prior probabilities are not used).
\item \code{gr_otherLogLik_internal(p)}. Gradient (vector of
derivatives with respect to each parameter) of \code{otherLogLik(p)}.
This is obtained using automatic differentiation (AD) with single-taping.
First call will always be slower than later calls.
}
}
\section{\code{control} list}{
\code{buildLaplace} accepts the following control list elements:
\itemize{
\item \code{split}. If TRUE (default), \code{randomEffectsNodes} will be
split into conditionally independent sets if possible. This
facilitates more efficient Laplace approximation because each
conditionally independent set can be marginalized independently. If
FALSE, \code{randomEffectsNodes} will be handled as one multivariate
block, with one multivariate Laplace approximation. If \code{split}
is a numeric vector, \code{randomEffectsNodes} will be split by
\code{split}(\code{randomEffectsNodes}, \code{control$split}). The
last option allows arbitrary control over how
\code{randomEffectsNodes} are blocked.
\item \code{check}. If TRUE (default), a warning is issued if
\code{paramNodes}, \code{randomEffectsNodes} and/or \code{calcNodes}
are provided but seek to have missing elements or unnecessary
elements based on some default inspection of the model. If
unnecessary warnings are emitted, simply set \code{check=FALSE}.
\item \code{innerOptimControl}. See \code{optimControl}.
\item \code{innerOptimMethod}. See \code{optimMethod}.
\item \code{innerOptimStart}. see \code{optimStart}.
\item \code{outOptimControl}. A list of control parameters for maximizing
the Laplace log-likelihood using \code{optim}. See 'Details' of
\code{\link{optim}} for further information.
}
}
\examples{
pumpCode <- nimbleCode({
for (i in 1:N){
theta[i] ~ dgamma(alpha, beta)
lambda[i] <- theta[i] * t[i]
x[i] ~ dpois(lambda[i])
}
alpha ~ dexp(1.0)
beta ~ dgamma(0.1, 1.0)
})
pumpConsts <- list(N = 10, t = c(94.3, 15.7, 62.9, 126, 5.24, 31.4, 1.05, 1.05, 2.1, 10.5))
pumpData <- list(x = c(5, 1, 5, 14, 3, 19, 1, 1, 4, 22))
pumpInits <- list(alpha = 0.1, beta = 0.1, theta = rep(0.1, pumpConsts$N))
pump <- nimbleModel(code = pumpCode, name = "pump", constants = pumpConsts,
data = pumpData, inits = pumpInits, buildDerivs = TRUE)
# Build Laplace approximation
pumpLaplace <- buildLaplace(pump)
\dontrun{
# Compile the model
Cpump <- compileNimble(pump)
CpumpLaplace <- compileNimble(pumpLaplace, project = pump)
# Calculate MLEs of parameters
MLEres <- CpumpLaplace$findMLE()
# Calculate estimates and standard errors for parameters and random effects on original scale
allres <- CpumpLaplace$summary(MLEres, randomEffectsStdError = TRUE)
}
}
\references{
Kass, R. and Steffey, D. (1989). Approximate Bayesian inference in
conditionally independent hierarchical models (parametric empirical Bayes
models). \emph{Journal of the American Statistical Association}, 84(407),
717–726.
Skaug, H. and Fournier, D. (2006). Automatic approximation of the marginal
likelihood in non-Gaussian hierarchical models. \emph{Computational
Statistics & Data Analysis}, 56, 699–709.
}
\author{
Wei Zhang, Perry de Valpine
}
|
/packages/nimble/man/laplace.Rd
|
permissive
|
nimble-dev/nimble
|
R
| false
| true
| 18,869
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Laplace.R
\name{buildLaplace}
\alias{buildLaplace}
\alias{laplace}
\alias{Laplace}
\title{Laplace approximation}
\usage{
buildLaplace(
model,
paramNodes,
randomEffectsNodes,
calcNodes,
calcNodesOther,
control = list()
)
}
\arguments{
\item{model}{a NIMBLE model object, such as returned by \code{nimbleModel}.
The model must have automatic derivatives (AD) turned on, e.g. by using
\code{buildDerivs=TRUE} in \code{nimbleModel}.}
\item{paramNodes}{a character vector of names of parameter nodes in the
model; defaults are provided by \code{\link{setupMargNodes}}.
Alternatively, \code{paramNodes} can be a list in the format returned by
\code{setupMargNodes}, in which case \code{randomEffectsNodes},
\code{calcNodes}, and \code{calcNodesOther} are not needed (and will be
ignored).}
\item{randomEffectsNodes}{a character vector of names of continuous unobserved
(latent) nodes to marginalize (integrate) over using Laplace approximation;
defaults are provided by \code{\link{setupMargNodes}}.}
\item{calcNodes}{a character vector of names of nodes for calculating the
integrand for Laplace approximation; defaults are provided by
\code{\link{setupMargNodes}}. There may be deterministic nodes between
\code{paramNodes} and \code{calcNodes}. These will be included in
calculations automatically and thus do not need to be included in
\code{calcNodes} (but there is no problem if they are).}
\item{calcNodesOther}{a character vector of names of nodes for calculating
terms in the log-likelihood that do not depend on any
\code{randomEffectsNodes}, and thus are not part of the marginalization,
but should be included for purposes of finding the MLE. This defaults to
stochastic nodes that depend on \code{paramNodes} but are not part of and
do not depend on \code{randomEffectsNodes}. There may be deterministic
nodes between \code{paramNodes} and \code{calcNodesOther}. These will be
included in calculations automatically and thus do not need to be included
in \code{calcNodesOther} (but there is no problem if they are).}
\item{control}{a named list for providing additional settings used in Laplace
approximation. See \code{control} section below.}
}
\description{
Build a Laplace approximation algorithm for a given NIMBLE model.
}
\section{\code{buildLaplace}}{
\code{buildLaplace} is the main function for constructing the Laplace
approximation for a given model or part of a model.
See method \code{summary} below and the separation function
\code{\link{summaryLaplace}} for processing maximum likelihood estimates
obtained by method \code{findMLE} below.
Any of the input node vectors, when provided, will be processed using
\code{nodes <- model$expandNodeNames(nodes)}, where \code{nodes} may be
\code{paramNodes}, \code{randomEffectsNodes}, and so on. This step allows
any of the inputs to include node-name-like syntax that might contain
multiple nodes. For example, \code{paramNodes = 'beta[1:10]'} can be
provided if there are actually 10 scalar parameters, 'beta[1]' through
'beta[10]'. The actual node names in the model will be determined by the
\code{exapndNodeNames} step.
In many (but not all) cases, one only needs to provide a NIMBLE model object
and then the function will construct reasonable defaults necessary for
Laplace approximation to marginalize over all continuous latent states
(aka random effects) in a model. The default values for the four groups of
nodes are obtained by calling \code{\link{setupMargNodes}}, whose arguments
match those here (except for a few arguments which are taken from control
list elements here).
\code{setupMargNodes} tries to give sensible defaults from
any combination of \code{paramNodes}, \code{randomEffectsNodes},
\code{calcNodes}, and \code{calcNodesOther} that are provided. For example,
if you provide only \code{randomEffectsNodes} (perhaps you want to
marginalize over only some of the random effects in your model),
\code{setupMargNodes} will try to determine appropriate choices for the
others.
These defaults make general assumptions such as that
\code{randomEffectsNodes} have \code{paramNodes} as parents. However, The
steps for determining defaults are not simple, and it is possible that they
will be refined in the future. It is also possible that they simply don't
give what you want for a particular model. One example where they will not
give desired results can occur when random effects have no prior
parameters, such as `N(0,1)` nodes that will be multiplied by a scale
factor (e.g. sigma) and added to other explanatory terms in a model. Such
nodes look like top-level parameters in terms of model structure, so
you must provide a \code{randomEffectsNodes} argument to indicate which
they are.
It can be helpful to use \code{setupMargNodes} directly to see exactly how
nodes will be arranged for Laplace approximation. For example, you may want
to verify the choice of \code{randomEffectsNodes} or get the order of
parameters it has established to use for making sense of the MLE and
results from the \code{summary} method. One can also call
\code{setupMargNodes}, customize the returned list, and then provide that
to \code{buildLaplace} as \code{paramNodes}. In that case,
\code{setupMargNodes} will not be called (again) by \code{buildLaplace}.
If \code{setupMargNodes} is emitting an unnecessary warning, simply use
\code{control=list(check=FALSE)}.
If any \code{paramNodes} (parameters) or \code{randomEffectsNodes} (random
effects / latent states) have constraints on the range of valid values
(because of the distribution they follow), they will be used on a
transformed scale determined by \code{parameterTransform}. This means the
Laplace approximation itself will be done on the transformed scale for
random effects and finding the MLE will be done on the transformed scale
for parameters. For parameters, prior distributions are not included in
calculations, but they are used to determine valid parameter ranges. For
example, if \code{sigma} is a standard deviation, you can declare it with a
prior such as \code{sigma ~ dhalfflat()} to indicate that it must be
greater than 0.
For default determination of parameters, all parameters must have a prior
distribution simply to indicate the range of valid values. For a param
\code{p} that has no constraint, a simple choice is \code{p ~ dflat()}.
The object returned by \code{buildLaplace} is a nimbleFunction object with
numerous methods (functions). The most useful ones are:
\itemize{
\item \code{calcLogLik(p, trans)}. Calculate the Laplace approximation to
the marginal log-likelihood function at parameter value \code{p}, which
(if \code{trans} is FALSE, which is the default) should match the order
of \code{paramNodes}. For any non-scalar nodes in \code{paramNodes},
the order within the node is column-major (which can be seen for R
objects using \code{as.numeric}). Return value is the scalar
(approximate, marginal) log likelihood.
If \code{trans} is TRUE, then \code{p} is the vector of parameters on
the transformed scale, if any, described above. In this case, the
parameters on the original scale (as the model was written) will be
determined by calling the method \code{pInverseTransform(p)}. Note that
the length of the parameter vector on the transformed scale might not
be the same as on the original scale (because some constraints of
non-scalar parameters result in fewer free transformed parameters than
original parameters).
\item \code{calcLaplace(p, trans)}. This is the same as \code{calcLogLik}.
\item \code{findMLE(pStart, method, hessian)}. Find the maximum likelihood
estimates of parameters using the Laplace-approximated marginal
likelihood. Arguments include \code{pStart}: initial parameter values
(defaults to parameter values currently in the model);
\code{method}: (outer) optimization method to use in \code{optim}
(defaults to "BFGS"); and
\code{hessian}: whether to calculate and return the Hessian matrix
(defaults to \code{TRUE}). Second derivatives in the Hessian are
determined by finite differences of the gradients obtained by
automatic differentiation (AD). Return value is a nimbleList of type
\code{optimResultNimbleList}, similar to what is returned by R's
optim. See \code{help(nimOptim)}.
\item \code{summary(MLEoutput, originalScale, randomEffectsStdError,
jointCovariance)}. Summarize the maximum likelihood estimation
results, given object \code{MLEoutput} that was returned by
\code{findMLE}. The summary can include a covariance matrix for the
parameters, the random effects, or both),
and these can be returned on the original parameter scale or on the
(potentially) transformed scale(s) used in estimation.
In more detail, \code{summary} accepts the following optional arguments:
\itemize{
\item \code{originalScale}. Logical. If TRUE, the function returns
results on the original scale(s) of parameters and random effects;
otherwise, it returns results on the transformed scale(s). If there
are no constraints, the two scales are identical. Defaults to TRUE.
\item \code{randomEffectsStdError}. Logical. If TRUE, standard
errors of random effects will be calculated.
Defaults to FALSE.
\item \code{jointCovariance}. Logical. If TRUE, the joint
variance-covariance matrix of the parameters and the random effects
will be returned. If FALSE, the variance-covariance matrix of the
parameters will be returned. Defaults to FALSE.
}
The object returned by \code{summary} is a nimbleList with elements:
\itemize{
\item \code{params}. A list that contains estimates and standard
errors of parameters (on the original or transformed scale, as
chosen by \code{originalScale}).
\item \code{randomEffects}. A list that contains estimates of random
effects and, if requested (\code{randomEffectsStdError=TRUE})
their standard errors, on original or transformed scale. Standard
errors are calculated following the generalized delta method of
Kass and Steffey (1989).
\item \code{vcov}. If requested (i.e.
\code{jointCovariance=TRUE}), the joint variance-covariance
matrix of the parameters and random effects, on original or
transformed scale. If \code{jointCovariance=FALSE}, the
covariance matrix of the parameters, on original or transformed
scale.
\item \code{scale}. \code{"original"} or \code{"transformed"}, the
scale on which results were requested.
}
}
Additional methods to access or control more details of the Laplace approximation include:
\itemize{
\item \code{getNodeNamesVec(returnParams)}. Return a vector (>1) of names
of parameters/random effects nodes, according to \code{returnParams =
TRUE/FALSE}. Use this if there is more than one node.
\item \code{getNodeNameSingle(returnParams)}. Return the name of a
single parameter/random effect node, according to \code{returnParams =
TRUE/FALSE}. Use this if there is only one node.
\item \code{setMethod(method)}. Set method ID for calculating the Laplace
approximation and gradient: 1 (\code{Laplace1}), 2 (\code{Laplace2},
default method), or 3 (\code{Laplace3}). See below for more details. Users
wanting to explore efficiency can try switching from method 2 (default) to
methods 1 or 3 and comparing performance. The first Laplace approximation
with each method will be (much) slower than subsequent Laplace
approximations.
\item \code{getMethod()}. Return the current method ID for Laplace.
\item \code{gr_logLik(p, trans)}. Gradient of the Laplace-approximated
marginal log-likelihood at parameter value \code{p}. Argument \code{trans}
is similar to that in \code{calcLaplace}. If there are multiple parameters,
the vector \code{p} is given in the order of parameter names returned by
\code{getNodeNamesVec(returnParams=TRUE)}.
\item \code{gr_Laplace(p, trans)}. This is the same as \code{gr_logLik}.
\item \code{otherLogLik(p)}. Calculate the \code{calcNodesOther}
nodes, which returns the log-likelihood of the parts of the model that are
not included in the Laplace approximation.
\item \code{gr_otherLogLik(p)}. Gradient (vector of derivatives with
respect to each parameter) of \code{otherLogLik(p)}. Results should
match \code{gr_otherLogLik_internal(p)} but may be more efficient after
the first call.
}
Finally, methods that are primarily for internal use by other methods include:
\itemize{
\item \code{p_transformed_gr_Laplace(pTransform)}. Gradient of the Laplace
approximation (\code{p_transformed_Laplace(pTransform)}) at transformed
(unconstrained) parameter value \code{pTransform}.
\item \code{pInverseTransform(pTransform)}. Back-transform the transformed
parameter value \code{pTransform} to original scale.
\item \code{derivs_pInverseTransform(pTransform, order)}. Derivatives of
the back-transformation (i.e. inverse of parameter transformation) with
respect to transformed parameters at \code{pTransform}. Derivative order
is given by \code{order} (any of 0, 1, and/or 2).
\item \code{reInverseTransform(reTrans)}. Back-transform the transformed
random effects value \code{reTrans} to original scale.
\item \code{derivs_reInverseTransform(reTrans, order)}. Derivatives of the
back-transformation (i.e. inverse of random effects transformation) with
respect to transformed random effects at \code{reTrans}. Derivative order
is given by \code{order} (any of 0, 1, and/or 2).
\item \code{optimRandomEffects(pTransform)}. Calculate the optimized
random effects given transformed parameter value \code{pTransform}. The
optimized random effects are the mode of the conditional distribution of
random effects given data at parameters \code{pTransform}, i.e. the
calculation of \code{calcNodes}.
\item \code{inverse_negHess(p, reTransform)}. Calculate the inverse of the
negative Hessian matrix of the joint (parameters and random effects)
log-likelihood with respect to transformed random effects, evaluated at
parameter value \code{p} and transformed random effects
\code{reTransform}.
\item \code{hess_logLik_wrt_p_wrt_re(p, reTransform)}. Calculate the
Hessian matrix of the joint log-likelihood with respect to parameters and
transformed random effects, evaluated at parameter value \code{p} and
transformed random effects \code{reTransform}.
\item \code{one_time_fixes()}. Users never need to run this. Is is called
when necessary internally to fix dimensionality issues if there is only
one parameter in the model.
\item \code{p_transformed_Laplace(pTransform)}. Laplace approximation at
transformed (unconstrained) parameter value \code{pTransform}. To
make maximizing the Laplace likelihood unconstrained, an automated
transformation via \code{\link{parameterTransform}} is performed on
any parameters with constraints indicated by their priors (even
though the prior probabilities are not used).
\item \code{gr_otherLogLik_internal(p)}. Gradient (vector of
derivatives with respect to each parameter) of \code{otherLogLik(p)}.
This is obtained using automatic differentiation (AD) with single-taping.
First call will always be slower than later calls.
}
}
\section{\code{control} list}{
\code{buildLaplace} accepts the following control list elements:
\itemize{
\item \code{split}. If TRUE (default), \code{randomEffectsNodes} will be
split into conditionally independent sets if possible. This
facilitates more efficient Laplace approximation because each
conditionally independent set can be marginalized independently. If
FALSE, \code{randomEffectsNodes} will be handled as one multivariate
block, with one multivariate Laplace approximation. If \code{split}
is a numeric vector, \code{randomEffectsNodes} will be split by
\code{split}(\code{randomEffectsNodes}, \code{control$split}). The
last option allows arbitrary control over how
\code{randomEffectsNodes} are blocked.
\item \code{check}. If TRUE (default), a warning is issued if
\code{paramNodes}, \code{randomEffectsNodes} and/or \code{calcNodes}
are provided but seek to have missing elements or unnecessary
elements based on some default inspection of the model. If
unnecessary warnings are emitted, simply set \code{check=FALSE}.
\item \code{innerOptimControl}. See \code{optimControl}.
\item \code{innerOptimMethod}. See \code{optimMethod}.
\item \code{innerOptimStart}. see \code{optimStart}.
\item \code{outOptimControl}. A list of control parameters for maximizing
the Laplace log-likelihood using \code{optim}. See 'Details' of
\code{\link{optim}} for further information.
}
}
\examples{
pumpCode <- nimbleCode({
for (i in 1:N){
theta[i] ~ dgamma(alpha, beta)
lambda[i] <- theta[i] * t[i]
x[i] ~ dpois(lambda[i])
}
alpha ~ dexp(1.0)
beta ~ dgamma(0.1, 1.0)
})
pumpConsts <- list(N = 10, t = c(94.3, 15.7, 62.9, 126, 5.24, 31.4, 1.05, 1.05, 2.1, 10.5))
pumpData <- list(x = c(5, 1, 5, 14, 3, 19, 1, 1, 4, 22))
pumpInits <- list(alpha = 0.1, beta = 0.1, theta = rep(0.1, pumpConsts$N))
pump <- nimbleModel(code = pumpCode, name = "pump", constants = pumpConsts,
data = pumpData, inits = pumpInits, buildDerivs = TRUE)
# Build Laplace approximation
pumpLaplace <- buildLaplace(pump)
\dontrun{
# Compile the model
Cpump <- compileNimble(pump)
CpumpLaplace <- compileNimble(pumpLaplace, project = pump)
# Calculate MLEs of parameters
MLEres <- CpumpLaplace$findMLE()
# Calculate estimates and standard errors for parameters and random effects on original scale
allres <- CpumpLaplace$summary(MLEres, randomEffectsStdError = TRUE)
}
}
\references{
Kass, R. and Steffey, D. (1989). Approximate Bayesian inference in
conditionally independent hierarchical models (parametric empirical Bayes
models). \emph{Journal of the American Statistical Association}, 84(407),
717–726.
Skaug, H. and Fournier, D. (2006). Automatic approximation of the marginal
likelihood in non-Gaussian hierarchical models. \emph{Computational
Statistics & Data Analysis}, 56, 699–709.
}
\author{
Wei Zhang, Perry de Valpine
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/beginr.R
\name{readdir}
\alias{readdir}
\title{Read multiple tables into a list.}
\usage{
readdir(mydir = getwd(), sep = c(","), output = c("list", "data.frame"), header = TRUE,
skip = 0)
}
\arguments{
\item{mydir}{the folder path}
\item{sep}{the field separator character.}
\item{output}{the type of the output. 'list' or 'data.frame'.}
\item{header}{logical. Indicating whether the file contains the names of the variables as its first line.}
\item{skip}{the number of lines of the data file to skip before beginning to read data.}
}
\value{
a list or a data frame
}
\description{
Read multiple tables into a list.
}
|
/man/readdir.Rd
|
no_license
|
cran/beginr
|
R
| false
| true
| 732
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/beginr.R
\name{readdir}
\alias{readdir}
\title{Read multiple tables into a list.}
\usage{
readdir(mydir = getwd(), sep = c(","), output = c("list", "data.frame"), header = TRUE,
skip = 0)
}
\arguments{
\item{mydir}{the folder path}
\item{sep}{the field separator character.}
\item{output}{the type of the output. 'list' or 'data.frame'.}
\item{header}{logical. Indicating whether the file contains the names of the variables as its first line.}
\item{skip}{the number of lines of the data file to skip before beginning to read data.}
}
\value{
a list or a data frame
}
\description{
Read multiple tables into a list.
}
|
library(BBmisc)
library(caret)
library(dummies)
#Import train data into a dataframe
setwd("~/Downloads/AV WNS HACKATHON")
orig_train_data <- read.csv('train_LZdllcl.csv', header = T,strip.white = T,stringsAsFactors = F)
#Check first few rows of the dataframe
head(orig_train_data)
#Check summary
summary(orig_train_data)
#Check for NA values across differenct columns of the dataframe
colSums(is.na(orig_train_data))
# nrow(orig_train_data) - nrow(orig_train_data[orig_train_data$length_of_service>1,])
# nrow(orig_train_data) - nrow(orig_train_data[orig_train_data$length_of_service==1 & orig_train_data$is_promoted==1,])
# nrow(orig_train_data[orig_train_data$length_of_service==1 & orig_train_data$is_promoted==1,])
# sum(is.na(orig_train_data$length_of_service))
# Check pattern of data - basic analysis
hist(orig_train_data$age)
hist(orig_train_data$length_of_service)
hist(orig_train_data$no_of_trainings)
hist(orig_train_data$previous_year_rating)
hist(orig_train_data$awards_won.)
hist(orig_train_data$avg_training_score)
hist(orig_train_data$region)
# Inspecting few columns more closely
sum(is.na(orig_train_data$education))
# Identify columns with blank values - education
nrow(orig_train_data[orig_train_data$education=='',])
# Identify columns with blank values - department
nrow(orig_train_data[orig_train_data$department=='',])
# Save original data frame into another df for further processing of the data
processed_data <- orig_train_data
# From previous analysis it is clear that gender column has just two values, convert it into factor
processed_data$gender <- as.factor(processed_data$gender)
summary(processed_data$gender)
# From previous analysis it is clear that department column does not have any blanks/nas, convert it into factor
processed_data$department <- as.factor(processed_data$department)
summary(processed_data$department)
# Replace blank values in education column with 'NA'
processed_data[which(processed_data$education==''),'education'] <- 'NA'
# Convert education column to a factor
processed_data$education <- as.factor(as.character(trimws(processed_data$education,which = 'both')))
summary(processed_data$education)
# Convert region column to a factor
processed_data$region <- as.factor(as.character(trimws(processed_data$region,which = 'both')))
summary(processed_data$region)
# Convert region column to a factor
processed_data$recruitment_channel <- as.factor(as.character(trimws(processed_data$recruitment_channel,which = 'both')))
summary(processed_data$recruitment_channel)
# Check structure of the processed data frame to see whether all the factor variables show-up correctly
str(processed_data)
# Convert KPI_met column to a factor column as it has only 0 and 1 values
processed_data$KPIs_met..80. <- as.factor(processed_data$KPIs_met..80.)
levels(processed_data$KPIs_met..80.) <- c('no','yes')
summary(processed_data$KPIs_met..80.)
# Convert is_promoted column the target column as a factor
processed_data$is_promoted <- as.factor(processed_data$is_promoted)
levels(processed_data$is_promoted) <- c('no','yes')
# Double check whether there are blanks or nas in target variable
summary(processed_data$is_promoted)
# Convert awards_won column to a factor column as there are only 0 or 1 values in this column
processed_data$awards_won. <- as.factor(processed_data$awards_won.)
levels(processed_data$awards_won.) <- c('no','yes')
summary(processed_data$awards_won.)
# Double check whether there are blanks or nas
summary(processed_data$awards_won.)
# Import the test data as a dataframe, do some prilimnary analysis and treat it the same was as training data set
test_data <- read.csv('test_2umaH9m.csv', header = T,strip.white = T,stringsAsFactors = F)
summary(test_data)
#----------Begin preprocessing of test data--------#
nrow(test_data[test_data$department=='',])
nrow(test_data[test_data$education=='',])
test_data$gender <- as.factor(test_data$gender)
summary(test_data$gender)
test_data$department <- as.factor(test_data$department)
summary(test_data$department)
test_data[which(test_data$education==''),'education'] <- 'NA'
test_data$education <- as.factor(as.character(trimws(test_data$education,which = 'both')))
summary(test_data$education)
test_data$region <- as.factor(as.character(trimws(test_data$region,which = 'both')))
summary(test_data$region)
colnames(test_data)
test_data$recruitment_channel <- as.factor(as.character(trimws(test_data$recruitment_channel,which = 'both')))
summary(test_data$recruitment_channel)
str(test_data)
test_data$KPIs_met..80. <- as.factor(test_data$KPIs_met..80.)
levels(test_data$KPIs_met..80.) <- c('no','yes')
summary(test_data$KPIs_met..80.)
test_data$awards_won. <- as.factor(test_data$awards_won.)
levels(test_data$awards_won.) <- c('no','yes')
summary(test_data$awards_won.)
colSums(is.na(test_data))
#----------End preprocessing of test data--------#
#Check whether the test data contains significantly different levels in factored variables
train_set_dept <- levels(processed_data$department)
test_set_dept <- levels(test_data$department)
setdiff(train_set_dept,test_set_dept)
train_set_edu <- levels(processed_data$education)
test_set_edu <- levels(test_data$education)
setdiff(train_set_edu,test_set_edu)
train_set_reg <- levels(processed_data$region)
test_set_reg <- levels(test_data$region)
setdiff(train_set_reg,test_set_reg)
#Clean-up some space
rm(train_set_dept,train_set_edu,train_set_reg,test_set_dept,test_set_edu,test_set_reg)
#Normalize and dummify the training data
std_dumified_train_data <- processed_data
colnames(std_dumified_train_data)
#Remove the employee_id variable which is of no use
std_dumified_train_data$employee_id <- NULL
#standardize
std_dumified_train_data <- normalize(std_dumified_train_data, method = "standardize", range = c(0, 1), margin = 1L, on.constant = "quiet")
#dummify factor columns, excluding the target variable
std_dumified_train_data <- dummy.data.frame(data = std_dumified_train_data[,!names(std_dumified_train_data) %in% 'is_promoted'],drop = F)
#Add the target variable back to the normalized and dummified dataframe
std_dumified_train_data$is_promoted <- processed_data$is_promoted
|
/others_solutions/pre-final/093r_409_573787_cf_Final_code_submission/01_Importing_and_Preprocessing_Final_Submission.R
|
no_license
|
DataScienceWorks/AV-WNS-2018-September-JobPromotionPrediction
|
R
| false
| false
| 6,191
|
r
|
library(BBmisc)
library(caret)
library(dummies)
#Import train data into a dataframe
setwd("~/Downloads/AV WNS HACKATHON")
orig_train_data <- read.csv('train_LZdllcl.csv', header = T,strip.white = T,stringsAsFactors = F)
#Check first few rows of the dataframe
head(orig_train_data)
#Check summary
summary(orig_train_data)
#Check for NA values across differenct columns of the dataframe
colSums(is.na(orig_train_data))
# nrow(orig_train_data) - nrow(orig_train_data[orig_train_data$length_of_service>1,])
# nrow(orig_train_data) - nrow(orig_train_data[orig_train_data$length_of_service==1 & orig_train_data$is_promoted==1,])
# nrow(orig_train_data[orig_train_data$length_of_service==1 & orig_train_data$is_promoted==1,])
# sum(is.na(orig_train_data$length_of_service))
# Check pattern of data - basic analysis
hist(orig_train_data$age)
hist(orig_train_data$length_of_service)
hist(orig_train_data$no_of_trainings)
hist(orig_train_data$previous_year_rating)
hist(orig_train_data$awards_won.)
hist(orig_train_data$avg_training_score)
hist(orig_train_data$region)
# Inspecting few columns more closely
sum(is.na(orig_train_data$education))
# Identify columns with blank values - education
nrow(orig_train_data[orig_train_data$education=='',])
# Identify columns with blank values - department
nrow(orig_train_data[orig_train_data$department=='',])
# Save original data frame into another df for further processing of the data
processed_data <- orig_train_data
# From previous analysis it is clear that gender column has just two values, convert it into factor
processed_data$gender <- as.factor(processed_data$gender)
summary(processed_data$gender)
# From previous analysis it is clear that department column does not have any blanks/nas, convert it into factor
processed_data$department <- as.factor(processed_data$department)
summary(processed_data$department)
# Replace blank values in education column with 'NA'
processed_data[which(processed_data$education==''),'education'] <- 'NA'
# Convert education column to a factor
processed_data$education <- as.factor(as.character(trimws(processed_data$education,which = 'both')))
summary(processed_data$education)
# Convert region column to a factor
processed_data$region <- as.factor(as.character(trimws(processed_data$region,which = 'both')))
summary(processed_data$region)
# Convert region column to a factor
processed_data$recruitment_channel <- as.factor(as.character(trimws(processed_data$recruitment_channel,which = 'both')))
summary(processed_data$recruitment_channel)
# Check structure of the processed data frame to see whether all the factor variables show-up correctly
str(processed_data)
# Convert KPI_met column to a factor column as it has only 0 and 1 values
processed_data$KPIs_met..80. <- as.factor(processed_data$KPIs_met..80.)
levels(processed_data$KPIs_met..80.) <- c('no','yes')
summary(processed_data$KPIs_met..80.)
# Convert is_promoted column the target column as a factor
processed_data$is_promoted <- as.factor(processed_data$is_promoted)
levels(processed_data$is_promoted) <- c('no','yes')
# Double check whether there are blanks or nas in target variable
summary(processed_data$is_promoted)
# Convert awards_won column to a factor column as there are only 0 or 1 values in this column
processed_data$awards_won. <- as.factor(processed_data$awards_won.)
levels(processed_data$awards_won.) <- c('no','yes')
summary(processed_data$awards_won.)
# Double check whether there are blanks or nas
summary(processed_data$awards_won.)
# Import the test data as a dataframe, do some prilimnary analysis and treat it the same was as training data set
test_data <- read.csv('test_2umaH9m.csv', header = T,strip.white = T,stringsAsFactors = F)
summary(test_data)
#----------Begin preprocessing of test data--------#
nrow(test_data[test_data$department=='',])
nrow(test_data[test_data$education=='',])
test_data$gender <- as.factor(test_data$gender)
summary(test_data$gender)
test_data$department <- as.factor(test_data$department)
summary(test_data$department)
test_data[which(test_data$education==''),'education'] <- 'NA'
test_data$education <- as.factor(as.character(trimws(test_data$education,which = 'both')))
summary(test_data$education)
test_data$region <- as.factor(as.character(trimws(test_data$region,which = 'both')))
summary(test_data$region)
colnames(test_data)
test_data$recruitment_channel <- as.factor(as.character(trimws(test_data$recruitment_channel,which = 'both')))
summary(test_data$recruitment_channel)
str(test_data)
test_data$KPIs_met..80. <- as.factor(test_data$KPIs_met..80.)
levels(test_data$KPIs_met..80.) <- c('no','yes')
summary(test_data$KPIs_met..80.)
test_data$awards_won. <- as.factor(test_data$awards_won.)
levels(test_data$awards_won.) <- c('no','yes')
summary(test_data$awards_won.)
colSums(is.na(test_data))
#----------End preprocessing of test data--------#
#Check whether the test data contains significantly different levels in factored variables
train_set_dept <- levels(processed_data$department)
test_set_dept <- levels(test_data$department)
setdiff(train_set_dept,test_set_dept)
train_set_edu <- levels(processed_data$education)
test_set_edu <- levels(test_data$education)
setdiff(train_set_edu,test_set_edu)
train_set_reg <- levels(processed_data$region)
test_set_reg <- levels(test_data$region)
setdiff(train_set_reg,test_set_reg)
#Clean-up some space
rm(train_set_dept,train_set_edu,train_set_reg,test_set_dept,test_set_edu,test_set_reg)
#Normalize and dummify the training data
std_dumified_train_data <- processed_data
colnames(std_dumified_train_data)
#Remove the employee_id variable which is of no use
std_dumified_train_data$employee_id <- NULL
#standardize
std_dumified_train_data <- normalize(std_dumified_train_data, method = "standardize", range = c(0, 1), margin = 1L, on.constant = "quiet")
#dummify factor columns, excluding the target variable
std_dumified_train_data <- dummy.data.frame(data = std_dumified_train_data[,!names(std_dumified_train_data) %in% 'is_promoted'],drop = F)
#Add the target variable back to the normalized and dummified dataframe
std_dumified_train_data$is_promoted <- processed_data$is_promoted
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/pleura.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.95,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/pleura/pleura_094.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Classifier/pleura/pleura_094.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 351
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/pleura.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.95,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/pleura/pleura_094.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
if ("SingleR" %in% installed.packages()) {
require(SingleR)
#' attach data on cell types using SingleR
#'
#' @details using the SingleR functionaly to attach that information to a scEx (SingleCellExperiment object)
#'
#'
#' @param scEx singleCellExperiment object
#' @param hpca.se e.g. HumanPrimaryCellAtlasData()
#' @param colName name for the new col
#'
#' @importFrom SingleR SingleR pruneScores
#'
#' @export applySimpleR
#'
applySimpleR <- function(scEx, hpca.se, colName) {
if (!"logcounts" %in% names(SummarizedExperiment::assays(scEx))) {
scEx <- scater::normalize(scEx)
}
common <- dplyr::intersect(rownames(scEx), rownames(hpca.se))
hpca.se <- hpca.se[common, ]
scExC <- scEx[common, ]
pred.hpca <- SingleR::SingleR(test = scExC, ref = hpca.se, labels = hpca.se$label.main)
to.remove <- SingleR::pruneScores(pred.hpca)
new.pruned <- pred.hpca$labels
new.pruned[pruneScores(pred.hpca, nmads = 5)] <- NA
cdata <- SingleCellExperiment::colData(scEx)
cdata[, colName] <- NA
cdata[colnames(scExC), colName] <- new.pruned
SingleCellExperiment::colData(scEx) <- cdata
return(scEx)
}
}
|
/inst/app/applySimpleR.R
|
no_license
|
C3BI-pasteur-fr/UTechSCB-SCHNAPPs
|
R
| false
| false
| 1,187
|
r
|
if ("SingleR" %in% installed.packages()) {
require(SingleR)
#' attach data on cell types using SingleR
#'
#' @details using the SingleR functionaly to attach that information to a scEx (SingleCellExperiment object)
#'
#'
#' @param scEx singleCellExperiment object
#' @param hpca.se e.g. HumanPrimaryCellAtlasData()
#' @param colName name for the new col
#'
#' @importFrom SingleR SingleR pruneScores
#'
#' @export applySimpleR
#'
applySimpleR <- function(scEx, hpca.se, colName) {
if (!"logcounts" %in% names(SummarizedExperiment::assays(scEx))) {
scEx <- scater::normalize(scEx)
}
common <- dplyr::intersect(rownames(scEx), rownames(hpca.se))
hpca.se <- hpca.se[common, ]
scExC <- scEx[common, ]
pred.hpca <- SingleR::SingleR(test = scExC, ref = hpca.se, labels = hpca.se$label.main)
to.remove <- SingleR::pruneScores(pred.hpca)
new.pruned <- pred.hpca$labels
new.pruned[pruneScores(pred.hpca, nmads = 5)] <- NA
cdata <- SingleCellExperiment::colData(scEx)
cdata[, colName] <- NA
cdata[colnames(scExC), colName] <- new.pruned
SingleCellExperiment::colData(scEx) <- cdata
return(scEx)
}
}
|
require(devtools)
install.packages("FinancialInstrument")
install.packages("PerformanceAnalytics")
install.packages("foreach")
install_github("braverock/blotter") # dependency
install_github("braverock/quantstrat")
|
/rprojectSPL/BackTest/Quantstrat/installQuantstrat00.R
|
permissive
|
UTexas80/gitSPL
|
R
| false
| false
| 214
|
r
|
require(devtools)
install.packages("FinancialInstrument")
install.packages("PerformanceAnalytics")
install.packages("foreach")
install_github("braverock/blotter") # dependency
install_github("braverock/quantstrat")
|
### Missing States handling with the help of location
Dataset$StateCorrected = Dataset$State
Dataset[which(Dataset$StateCorrected == 'Chattisgarh'),'StateCorrected'] = 'Chhattisgarh'
Dataset[which(Dataset$StateCorrected == 'Orissa'),'StateCorrected'] = 'Odisha'
Dataset[which(Dataset$StateCorrected == 'W Bengal'),'StateCorrected'] = 'West Bengal'
Dataset[which(Dataset$StateCorrected == 'TN'),'StateCorrected'] = 'Tamil Nadu'
Dataset$StateCorrected = tolower(trimws(Dataset$StateCorrected))
Dataset$`City-final` = tolower(trimws(Dataset$`City-final`))
Dataset$City = tolower(trimws(Dataset$City))
Dataset$Location = tolower(trimws(Dataset$Location))
States_Cities = read_excel(path = 'Cities_States.xlsx')
States_Cities$`Name of City` = trimws(tolower(States_Cities$`Name of City`),which = 'both')
States_Cities$State = trimws(tolower(States_Cities$State),which = 'both')
Dataset$StateCorrected = trimws(tolower(Dataset$StateCorrected),which = 'both')
Dataset$Location = trimws(tolower(Dataset$Location),which = 'both')
# Mapping based on loaction
require(readxl)
Locations_Assigned = read_excel(path = 'data/Cities_States.xlsx')
Locations_Missed = read_excel(path = 'data/Workingdata.xlsx',sheet = 'State_City_Missing')
Locations_Assigned$Location = tolower(trimws(Locations_Assigned$Location))
Locations_Missed$Location = tolower(trimws(Locations_Missed$Location))
require(stringdist)
DistanceNameMatrix<-matrix(NA, ncol = length(Locations_Missed$Location),
nrow = length(Locations_Assigned$Location))
for(i in 1:length(Locations_Missed$Location)) {
for(j in 1:length(Locations_Assigned$Location)) {
DistanceNameMatrix[j,i]<-stringdist(tolower(Locations_Missed[i,]$Location),
tolower(Locations_Assigned[j,]$Location),
method ='jw')
}
}
Match_Location_DF<-NULL
MinName<-apply(DistanceNameMatrix, 1, base::min)
for(i in 1:nrow(DistanceNameMatrix)){
S2<-match(MinName[i],DistanceNameMatrix[i,])
S1<-i
Match_Location_DF<-rbind(data.frame(Missed_Id=S2,Assigned_Id=S1,
Missed=Locations_Missed[S2,]$Location,
Assigned=Locations_Assigned[S1,]$Location,
adist=MinName[i],
method='jm'),
Match_Location_DF)
}
Match_Location_DF = Match_Location_DF[Match_Location_DF$adist<=0.05,]
Match_Location_DF = Match_Location_DF[order(Match_Location_DF$Assigned_Id),]
Match_Location_DF = Match_Location_DF[!duplicated(Match_Location_DF[,3:4]),]
for(i in 1:nrow(Match_Location_DF)){
temp_Assigned_Id = Match_Location_DF$Assigned_Id[i]
temp_state = Locations_Assigned$State[temp_Assigned_Id]
temp_location = as.character(Match_Location_DF$Missed[i])
Dataset = within(Dataset,StateCorrected[Location == temp_location ] <- temp_state)
} ### 4546 state are mapped !!!
# Mapping based on City
excel_sheets(path = 'data/Workingdata.xlsx')
Cities_Missed = read_excel(path = 'data/Workingdata.xlsx',sheet = 'State_Missing_CityDetails')
Locations_Assigned$Location = tolower(trimws(Locations_Assigned$Location))
Cities_Missed$City = tolower(trimws(Cities_Missed$City))
DistanceNameMatrix<-matrix(NA, ncol = length(Cities_Missed$City),
nrow = length(Locations_Assigned$Location))
for(i in 1:length(Cities_Missed$City)) {
for(j in 1:length(Locations_Assigned$Location)) {
DistanceNameMatrix[j,i]<-stringdist(tolower(Cities_Missed[i,]$City),
tolower(Locations_Assigned[j,]$Location),
method ='jw')
}
}
Match_Location_DF<-NULL
MinName<-apply(DistanceNameMatrix, 1, base::min)
for(i in 1:nrow(DistanceNameMatrix)){
S2<-match(MinName[i],DistanceNameMatrix[i,])
S1<-i
Match_Location_DF<-rbind(data.frame(Missed_Id=S2,Assigned_Id=S1,
Missed=Cities_Missed[S2,]$City,
Assigned=Locations_Assigned[S1,]$Location,
adist=MinName[i],
method='jm'),
Match_Location_DF)
}
Match_Location_DF = Match_Location_DF[Match_Location_DF$adist==0,]
Match_Location_DF = Match_Location_DF[order(Match_Location_DF$Assigned_Id),]
Match_Location_DF = Match_Location_DF[!duplicated(Match_Location_DF[,3:4]),]
for(i in 1:nrow(Match_Location_DF)){
temp_Assigned_Id = Match_Location_DF$Assigned_Id[i]
temp_state = Locations_Assigned$State[temp_Assigned_Id]
temp_location = as.character(Match_Location_DF$Missed[i])
Dataset = within(Dataset,StateCorrected[Location == temp_location ] <- temp_state)
} ### 3257 state are mapped !!!
# Corrected State wise Analysis
OrderState = read_excel('StateOrder.xlsx')
Dataset$StateCorrected = tolower(trimws(Dataset$StateCorrected))
Dataset = Dataset %>%
left_join(OrderState,by='StateCorrected')
GGBarPlot_Facet <- function(dataset,title){
colnames(dataset) = c('X','Y')
dataset %>%
group_by(X)%>%
ggplot(aes(Y))+
facet_wrap(~X, scales = "free_x")+
geom_bar(fill=barColour)+
ggtitle(paste('Question',title,'Response across states'))+xlab('')+ylab('')
}
for(i in 30:39){
print(GGBarPlot_Facet(Dataset[,c(40,i)],i-29))
}
Dataset %>%
ggplot(aes(StateCorrected))+
geom_bar()+
ggtitle('State wise records count')+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Year wise analysis
GGplotYoYLinePlot <- function(dataset,i){
colnames(dataset) = c('Y','D')
dataset %>%
group_by(Y,D)%>%
summarise(records = length(D))%>%
ggplot(aes(Y,records,colour = D))+
guides(colour=guide_legend(title='Rating Index'))+
geom_line()+ggtitle(paste('Question',i,'Rating over the years'))+
xlab('Years')+ylab('No of Records')
}
for(i in 30:39){
print(GGplotYoYLinePlot(Dataset[,c(4,i)],i-29))
}
qplot(Year,..count..,data = Dataset,
geom = 'bar',main='Year wise records count')
CustomerBasedDataset = Dataset[,c(4,5,8,9,10,12,15,13,11,14,28,29,40)][!apply(Dataset[,14], 1, function(x) any(x=="" | is.na(x))),]
CustomerBasedDataset = CustomerBasedDataset[order(CustomerBasedDataset$`Customer name`,CustomerBasedDataset$`Month&Year`),]
repeatedCustRecIds = which(duplicated(CustomerBasedDataset$`Customer name`))
repeatedCustRecIds = sort(unique(c(repeatedCustRecIds,repeatedCustRecIds-1)))
CustomerBasedDataset_Repeated = CustomerBasedDataset[repeatedCustRecIds,]
NotifNoBasedDataset = Dataset[,c(4,5,8,9,10,12,13,15,11,14,28,29,40)][!apply(Dataset[,9], 1, function(x) any(x=="" | is.na(x))),]
NotifNoBasedDataset = NotifNoBasedDataset[order(NotifNoBasedDataset$`Notification no`,NotifNoBasedDataset$`Month&Year`),]
repeatedNotifNoIds = which(duplicated(NotifNoBasedDataset$`Notification no`))
repeatedNotifNoIds = sort(unique(c(repeatedNotifNoIds,repeatedNotifNoIds-1)))
NotifNoBasedDataset_Repeated = NotifNoBasedDataset[repeatedNotifNoIds,]
StateWiseDF_List = split(Dataset,f = Dataset$StateCorrected)
Dataset_FIE = subset(Dataset,Dataset$Product=='FIE')
Dataset_AE = subset(Dataset,Dataset$Product=='AE')
StateWiseDF_List_FIE = split(Dataset_FIE,f = Dataset_FIE$StateCorrected)
StateWiseDF_List_AE = split(Dataset_AE,f = Dataset_AE$StateCorrected)
|
/Customer_Satisfaction_Capstone/Cluster_Analysis.R
|
no_license
|
raviyelugula/Projects
|
R
| false
| false
| 7,427
|
r
|
### Missing States handling with the help of location
Dataset$StateCorrected = Dataset$State
Dataset[which(Dataset$StateCorrected == 'Chattisgarh'),'StateCorrected'] = 'Chhattisgarh'
Dataset[which(Dataset$StateCorrected == 'Orissa'),'StateCorrected'] = 'Odisha'
Dataset[which(Dataset$StateCorrected == 'W Bengal'),'StateCorrected'] = 'West Bengal'
Dataset[which(Dataset$StateCorrected == 'TN'),'StateCorrected'] = 'Tamil Nadu'
Dataset$StateCorrected = tolower(trimws(Dataset$StateCorrected))
Dataset$`City-final` = tolower(trimws(Dataset$`City-final`))
Dataset$City = tolower(trimws(Dataset$City))
Dataset$Location = tolower(trimws(Dataset$Location))
States_Cities = read_excel(path = 'Cities_States.xlsx')
States_Cities$`Name of City` = trimws(tolower(States_Cities$`Name of City`),which = 'both')
States_Cities$State = trimws(tolower(States_Cities$State),which = 'both')
Dataset$StateCorrected = trimws(tolower(Dataset$StateCorrected),which = 'both')
Dataset$Location = trimws(tolower(Dataset$Location),which = 'both')
# Mapping based on loaction
require(readxl)
Locations_Assigned = read_excel(path = 'data/Cities_States.xlsx')
Locations_Missed = read_excel(path = 'data/Workingdata.xlsx',sheet = 'State_City_Missing')
Locations_Assigned$Location = tolower(trimws(Locations_Assigned$Location))
Locations_Missed$Location = tolower(trimws(Locations_Missed$Location))
require(stringdist)
DistanceNameMatrix<-matrix(NA, ncol = length(Locations_Missed$Location),
nrow = length(Locations_Assigned$Location))
for(i in 1:length(Locations_Missed$Location)) {
for(j in 1:length(Locations_Assigned$Location)) {
DistanceNameMatrix[j,i]<-stringdist(tolower(Locations_Missed[i,]$Location),
tolower(Locations_Assigned[j,]$Location),
method ='jw')
}
}
Match_Location_DF<-NULL
MinName<-apply(DistanceNameMatrix, 1, base::min)
for(i in 1:nrow(DistanceNameMatrix)){
S2<-match(MinName[i],DistanceNameMatrix[i,])
S1<-i
Match_Location_DF<-rbind(data.frame(Missed_Id=S2,Assigned_Id=S1,
Missed=Locations_Missed[S2,]$Location,
Assigned=Locations_Assigned[S1,]$Location,
adist=MinName[i],
method='jm'),
Match_Location_DF)
}
Match_Location_DF = Match_Location_DF[Match_Location_DF$adist<=0.05,]
Match_Location_DF = Match_Location_DF[order(Match_Location_DF$Assigned_Id),]
Match_Location_DF = Match_Location_DF[!duplicated(Match_Location_DF[,3:4]),]
for(i in 1:nrow(Match_Location_DF)){
temp_Assigned_Id = Match_Location_DF$Assigned_Id[i]
temp_state = Locations_Assigned$State[temp_Assigned_Id]
temp_location = as.character(Match_Location_DF$Missed[i])
Dataset = within(Dataset,StateCorrected[Location == temp_location ] <- temp_state)
} ### 4546 state are mapped !!!
# Mapping based on City
excel_sheets(path = 'data/Workingdata.xlsx')
Cities_Missed = read_excel(path = 'data/Workingdata.xlsx',sheet = 'State_Missing_CityDetails')
Locations_Assigned$Location = tolower(trimws(Locations_Assigned$Location))
Cities_Missed$City = tolower(trimws(Cities_Missed$City))
DistanceNameMatrix<-matrix(NA, ncol = length(Cities_Missed$City),
nrow = length(Locations_Assigned$Location))
for(i in 1:length(Cities_Missed$City)) {
for(j in 1:length(Locations_Assigned$Location)) {
DistanceNameMatrix[j,i]<-stringdist(tolower(Cities_Missed[i,]$City),
tolower(Locations_Assigned[j,]$Location),
method ='jw')
}
}
Match_Location_DF<-NULL
MinName<-apply(DistanceNameMatrix, 1, base::min)
for(i in 1:nrow(DistanceNameMatrix)){
S2<-match(MinName[i],DistanceNameMatrix[i,])
S1<-i
Match_Location_DF<-rbind(data.frame(Missed_Id=S2,Assigned_Id=S1,
Missed=Cities_Missed[S2,]$City,
Assigned=Locations_Assigned[S1,]$Location,
adist=MinName[i],
method='jm'),
Match_Location_DF)
}
Match_Location_DF = Match_Location_DF[Match_Location_DF$adist==0,]
Match_Location_DF = Match_Location_DF[order(Match_Location_DF$Assigned_Id),]
Match_Location_DF = Match_Location_DF[!duplicated(Match_Location_DF[,3:4]),]
for(i in 1:nrow(Match_Location_DF)){
temp_Assigned_Id = Match_Location_DF$Assigned_Id[i]
temp_state = Locations_Assigned$State[temp_Assigned_Id]
temp_location = as.character(Match_Location_DF$Missed[i])
Dataset = within(Dataset,StateCorrected[Location == temp_location ] <- temp_state)
} ### 3257 state are mapped !!!
# Corrected State wise Analysis
OrderState = read_excel('StateOrder.xlsx')
Dataset$StateCorrected = tolower(trimws(Dataset$StateCorrected))
Dataset = Dataset %>%
left_join(OrderState,by='StateCorrected')
GGBarPlot_Facet <- function(dataset,title){
colnames(dataset) = c('X','Y')
dataset %>%
group_by(X)%>%
ggplot(aes(Y))+
facet_wrap(~X, scales = "free_x")+
geom_bar(fill=barColour)+
ggtitle(paste('Question',title,'Response across states'))+xlab('')+ylab('')
}
for(i in 30:39){
print(GGBarPlot_Facet(Dataset[,c(40,i)],i-29))
}
Dataset %>%
ggplot(aes(StateCorrected))+
geom_bar()+
ggtitle('State wise records count')+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Year wise analysis
GGplotYoYLinePlot <- function(dataset,i){
colnames(dataset) = c('Y','D')
dataset %>%
group_by(Y,D)%>%
summarise(records = length(D))%>%
ggplot(aes(Y,records,colour = D))+
guides(colour=guide_legend(title='Rating Index'))+
geom_line()+ggtitle(paste('Question',i,'Rating over the years'))+
xlab('Years')+ylab('No of Records')
}
for(i in 30:39){
print(GGplotYoYLinePlot(Dataset[,c(4,i)],i-29))
}
qplot(Year,..count..,data = Dataset,
geom = 'bar',main='Year wise records count')
CustomerBasedDataset = Dataset[,c(4,5,8,9,10,12,15,13,11,14,28,29,40)][!apply(Dataset[,14], 1, function(x) any(x=="" | is.na(x))),]
CustomerBasedDataset = CustomerBasedDataset[order(CustomerBasedDataset$`Customer name`,CustomerBasedDataset$`Month&Year`),]
repeatedCustRecIds = which(duplicated(CustomerBasedDataset$`Customer name`))
repeatedCustRecIds = sort(unique(c(repeatedCustRecIds,repeatedCustRecIds-1)))
CustomerBasedDataset_Repeated = CustomerBasedDataset[repeatedCustRecIds,]
NotifNoBasedDataset = Dataset[,c(4,5,8,9,10,12,13,15,11,14,28,29,40)][!apply(Dataset[,9], 1, function(x) any(x=="" | is.na(x))),]
NotifNoBasedDataset = NotifNoBasedDataset[order(NotifNoBasedDataset$`Notification no`,NotifNoBasedDataset$`Month&Year`),]
repeatedNotifNoIds = which(duplicated(NotifNoBasedDataset$`Notification no`))
repeatedNotifNoIds = sort(unique(c(repeatedNotifNoIds,repeatedNotifNoIds-1)))
NotifNoBasedDataset_Repeated = NotifNoBasedDataset[repeatedNotifNoIds,]
StateWiseDF_List = split(Dataset,f = Dataset$StateCorrected)
Dataset_FIE = subset(Dataset,Dataset$Product=='FIE')
Dataset_AE = subset(Dataset,Dataset$Product=='AE')
StateWiseDF_List_FIE = split(Dataset_FIE,f = Dataset_FIE$StateCorrected)
StateWiseDF_List_AE = split(Dataset_AE,f = Dataset_AE$StateCorrected)
|
# function that computes EYdopt - unadj, TMLE, IPTW, gcomp, CV-TMLE
#' @name EYdopt
#' @aliases EYdopt
#' @title Estimation of E[Ydopt]
#' @description Given a W, A, Y dataset, this function will compute the estimated ODTR using SuperLearner. If a Qbar function is provided that computes the true E[Y|A,W] (e.g., if simulating), the function will also return the true treatment under the optimal rule and other metrics of evaluating the estimated optimal rule's performance. Then, it will estimate E[Ydopt] using g-computation, IPTW, IPTW-DR, TMLE, and CV-TMLE. Follows the framework of Luedtke and van der laan, 2015 and 2016.
#'
#' @param W Data frame of observed baseline covariates
#' @param V Data frame of observed baseline covariates (subset of W) used to design the ODTR
#' @param A Vector of treatment
#' @param Y Vector of outcome (continuous or binary)
#' @param metalearner Discrete ("discrete"), blip-based ("blip"), vote-based SuperLearner ("vote"). Note that if metalearner is "vote" then cannot put in kappa.
#' @param g.SL.library SuperLearner library for estimating txt mechanism
#' @param QAW.SL.library SuperLearner library for estimating outcome regression
#' @param blip.SL.library SuperLearner library for estimating the blip
#' @param risk.type Risk type in order to pick optimal combination of coefficients to combine the candidate algorithms. For (1) MSE risk use "CV MSE"; for (2) -E[Ydopt] risk use "CV IPCWDR" (for -E[Ydopt] estimated using double-robust IPTW) or "CV TMLE" (for -E[Ydopt] estimates using TMLE); (3) For the upper bound of the CI of -E[Ydopt] use "CV TMLE CI"
#' @param dopt.SL.library SuperLearner library for estimating dopt directly. Default is \code{NULL}. Could be "DonV", "Qlearn", "OWL", "EARL", "optclass", "RWL", "treatall", "treatnone". Could also be "all" for all algorithms.
#' @param QAW.fun True outcome regression E[Y|A,W]. Useful for simulations. Default is \code{NULL}.
#' @param VFolds Number of folds to use in cross-validation. Default is 10.
#' @param grid.size Grid size for \code{\link[hitandrun:simplex.sample]{simplex.sample()}} function to create possible combinations of coefficients
#' @param family either "gaussian" or "binomial". Default is null, if outcome is between 0 and 1 it will change to binomial, otherwise gaussian
#' @param contrast A dim = (n, num contrasts) matrix or dataframe (with columns preferably named) to contrast Psi = E[Ydopt]-E[Ycontrast] for CV-TMLE. For example, contrast = data.frame("EY0" = rep(0,n)) will contrast Psi = E[Ydopt]-E[Y0]. Default is \code{NULL}.
#' @param odtr.obj An object from the odtr function that estimates the odtr.
#'
#' @importFrom stats predict var qnorm
#' @import SuperLearner
#'
#' @return If the true Qbar function is specified, the output will be a vector of point estimates of E[Ydopt] and their respective confidence intervals. This will be for both the estimated optimal rule and the true optimal rule. Performance results on the optimal rule will also be output: proportion of people treated under ODTR, proportion of times the estimated rule matches the optimal rule, the mean outcome under the estimated optimal rule under the true mean outcome function, and the mean outcome under the estimated optimal rule under the sample-specific true mean outcome.
#'
#' If the true Qbar is not specified, return:
#' \describe{
#' \item{EYdopt_estimates}{Point estimates and confidence intervals for E[Ydopt], using the unadjusted mean outcome for the people who received the optimal rule, g-computation, IPTW, IPTW-DR, TMLE}
#' \item{SL.odtr}{SuperLearner list. See \code{SL.blip} or \code{SL.vote} documentation.}
#' }
#'
#' @references
#' van der Laan, Mark J., and Alexander R. Luedtke. "Targeted learning of the mean outcome under an optimal dynamic treatment rule." \emph{Journal of causal inference} 3.1 (2015): 61-95.
#'
#' Luedtke, Alexander R., and Mark J. van der Laan. "Super-learning of an optimal dynamic treatment rule." \emph{The international journal of biostatistics} 12.1 (2016): 305-332.
#'
#' Luedtke, Alexander R., and Mark J. van der Laan. "Optimal individualized treatments in resource-limited settings." \emph{The international journal of biostatistics} 12.1 (2016): 283-303.
#'
#' Coyle, J.R. (2017). Jeremy Coyle, “Computational Considerations for Targeted Learning” PhD diss., University of California, Berkeley 2017 \url{https://escholarship.org/uc/item/9kh0b9vm}.
#'
#' @export
#'
#' @examples
#' ## Example
#' library(SuperLearner)
#' library(hitandrun)
#' ObsData = subset(DGP_bin_simple(1000), select = -c(A_star, Y_star))
#' W = subset(ObsData, select = -c(A,Y))
#' V = W
#' A = ObsData$A
#' Y = ObsData$Y
#'
#' # E[Ydopt] using blip-based estimate of ODTR with risk function CV-TMLE
#' EYdopt(W = W, A = A, Y = Y, V = W, blip.SL.library = "SL.blip.HTEepi", g.SL.library = "SL.mean", QAW.SL.library = "SL.QAW.HTEepi", risk.type = "CV TMLE", metalearner = 'blip')
EYdopt = function(W, V, A, Y, g.SL.library = "SL.mean", QAW.SL.library, blip.SL.library, dopt.SL.library = NULL,
metalearner = "blip", risk.type = "CV TMLE",
grid.size = 100, VFolds = 10, QAW.fun = NULL,
family = NULL, contrast = NULL){
n = length(Y)
if (is.null(family)) { family = ifelse(max(Y) <= 1 & min(Y) >= 0, "binomial", "gaussian") }
ab = range(Y)
#### All things non-CV ####
SL.odtr = odtr(V=V, W=W, A=A, Y=Y, ab = ab, g.SL.library = g.SL.library, QAW.SL.library = QAW.SL.library, blip.SL.library=blip.SL.library,
dopt.SL.library = dopt.SL.library, metalearner = metalearner,
risk.type=risk.type, grid.size=grid.size, VFolds=VFolds, QAW.fun = NULL, newV = NULL,
kappa = NULL, family = family, rule.output = "d")
QAW.reg = SL.odtr$QAW.reg
g.reg = SL.odtr$g.reg
dopt = SL.odtr$dopt
#EnYdn, non-CVTMLE
EnYdn.nonCVTMLE = estimatorsEYd_nonCVTMLE(W = W, A = A, Y = Y, d = dopt, QAW.reg = QAW.reg, g.reg = g.reg, ab = ab, contrast = contrast)
if (!is.null(QAW.fun)) {
dopt0 = dopt.fun(blip = QAW.fun(A = 1, W = W) - QAW.fun(A = 0, W = W), kappa = NULL)
#EnYd0, non-CVTMLE
EnYd0.nonCVTMLE = estimatorsEYd_nonCVTMLE(W = W, A = A, Y = Y, d = dopt0, QAW.reg = QAW.reg, g.reg = g.reg, ab = ab, contrast = contrast)
#E0Ydn, non-CVTMLE
E0Ydn.nonCVTMLE = mean(QAW.fun(A = dopt, W = W))
if (!is.null(contrast)) {
contrastE0Ydn_fun = function(contrast_i) {
contrast_i = contrast_i
E0Ydn.nonCVTMLE_i = E0Ydn.nonCVTMLE - mean(QAW.fun(A = contrast_i, W = W))
return(E0Ydn.nonCVTMLE_i)
}
contrastE0Ydn_df = apply(contrast, 2, contrastE0Ydn_fun)
E0Ydn.nonCVTMLE = c(EYd = E0Ydn.nonCVTMLE, contrastE0Ydn_df)
}
}
#### All things CV ####
folds = sample(1:VFolds, size = n, replace = T)
CV.TMLE_fun = function(i){
SL.odtr.train = odtr(V = V[folds!=i,,drop=F], W = W[folds!=i,,drop=F], A = A[folds!=i], Y = Y[folds!=i], newV = V[folds==i,,drop=F],
g.SL.library = g.SL.library, QAW.SL.library = QAW.SL.library, blip.SL.library=blip.SL.library, dopt.SL.library = dopt.SL.library,
metalearner = metalearner, risk.type=risk.type, grid.size=grid.size, VFolds=VFolds, QAW.fun = NULL,
kappa = NULL, family = family, ab = ab, rule.output = "d")
g.reg.train = SL.odtr.train$g.reg
QAW.reg.train = SL.odtr.train$QAW.reg
dopt.test = SL.odtr.train$dopt
g1W.test = predict(g.reg.train, newdata = W[folds == i,,drop=F], type = "response")$pred
gAW.test = ifelse(A[folds == i] == 1, g1W.test, 1 - g1W.test)
Qdopt.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = dopt.test), type = "response")$pred
tmle_objects.EnYdn.test = tmle.d.fun(A = A[folds == i], Y = Y[folds==i], d = dopt.test, Qd = Qdopt.test, gAW = gAW.test, ab = ab)
Psi_EnYdn.test = tmle_objects.EnYdn.test$psi
varIC_EnYdn.test = var(tmle_objects.EnYdn.test$IC)
toreturn = list(Psi_EnYdn.test = c(EYd = Psi_EnYdn.test), varIC_EnYdn.test = c(EYd = varIC_EnYdn.test))
if (!is.null(QAW.fun)) {
E0Ydn.test = mean(QAW.fun(A = dopt.test, W = W[folds == i,,drop=F]))
Qdopt0.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = dopt0[folds == i]), type = "response")$pred
tmle_objects.EnYd0.test = tmle.d.fun(A = A[folds == i], Y = Y[folds==i], d = dopt0[folds == i], Qd = Qdopt0.test, gAW = gAW.test, ab = ab)
Psi_EnYd0.test = tmle_objects.EnYd0.test$psi
varIC_EnYd0.test = var(tmle_objects.EnYd0.test$IC)
toreturn = list(Psi_EnYdn.test = c(EYd = Psi_EnYdn.test), varIC_EnYdn.test = c(EYd = varIC_EnYdn.test),
Psi_EnYd0.test = c(EYd = Psi_EnYd0.test), varIC_EnYd0.test = c(EYd = varIC_EnYd0.test),
E0Ydn.test = c(EYd = E0Ydn.test))
}
if (!is.null(contrast)) {
contrast_fun = function(contrast_i) {
contrast.test_i = contrast_i[folds == i]
Qcontrast.test_i = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = contrast.test_i), type = "response")$pred
tmle_objects.contrast.test_i = tmle.d.fun(A = A[folds == i], Y = Y[folds==i], d = contrast.test_i, Qd = Qcontrast.test_i, gAW = gAW.test, ab = ab)
Psi_EnYdn.test_i = tmle_objects.EnYdn.test$psi - tmle_objects.contrast.test_i$psi
varIC_EnYdn.test_i = var(tmle_objects.EnYdn.test$IC - tmle_objects.contrast.test_i$IC)
toreturn_contrast = c(Psi_EnYdn.test_i = Psi_EnYdn.test_i, varIC_EnYdn.test_i = varIC_EnYdn.test_i)
if (!is.null(QAW.fun)) {
E0Ydn.test_i = mean(unlist(QAW.fun(A = dopt.test, W = W[folds == i,,drop=F])) - unlist(QAW.fun(A = contrast.test_i, W = W[folds == i,,drop=F])))
Psi_EnYd0.test_i = tmle_objects.EnYd0.test$psi - tmle_objects.contrast.test_i$psi
varIC_EnYd0.test_i = var(tmle_objects.EnYd0.test$IC - tmle_objects.contrast.test_i$IC)
toreturn_contrast = c(Psi_EnYdn.test = Psi_EnYdn.test_i, varIC_EnYdn.test = varIC_EnYdn.test_i,
Psi_EnYd0.test = Psi_EnYd0.test_i, varIC_EnYd0.test = varIC_EnYd0.test_i,
E0Ydn.test = E0Ydn.test_i)
}
return(toreturn_contrast)
}
contrast_df = apply(contrast, 2, contrast_fun)
toreturn_contrast = lapply(1:length(toreturn), function(x) c(toreturn[[x]], contrast_df[x,]))
toreturn_contrast = lapply(toreturn_contrast, function(x) setNames(x, c("EYd", colnames(contrast_df))))
names(toreturn_contrast) = names(toreturn)
toreturn = toreturn_contrast
}
print(paste("CV TMLE finished fold", i, "of", VFolds))
return(toreturn)
}
CV.TMLE.est = lapply(1:VFolds, CV.TMLE_fun)
#EnYdn, CVTMLE
Psi_CV.TMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$Psi_EnYdn.test)))
var_CV.TMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$varIC_EnYdn.test)))/n
CI_CV.TMLE = sapply(1:length(Psi_CV.TMLE), function(i) Psi_CV.TMLE[i] + c(-1,1)*qnorm(0.975)*sqrt(var_CV.TMLE[i]))
rownames(CI_CV.TMLE) = c("CI_CV.TMLE1", "CI_CV.TMLE2")
colnames(CI_CV.TMLE) = names(Psi_CV.TMLE)
EnYdn.CVTMLE = rbind(Psi_CV.TMLE = Psi_CV.TMLE, CI_CV.TMLE = CI_CV.TMLE)
if (!is.null(QAW.fun)) {
#EnYd0, CVTMLE
Psi_CV.TMLE0 = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$Psi_EnYd0.test)))
var_CV.TMLE0 = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$varIC_EnYd0.test)))/n
CI_CV.TMLE0 = sapply(1:length(Psi_CV.TMLE0), function(i) Psi_CV.TMLE0[i] + c(-1,1)*qnorm(0.975)*sqrt(var_CV.TMLE0[i]))
rownames(CI_CV.TMLE0) = c("CI_CV.TMLE1", "CI_CV.TMLE2")
colnames(CI_CV.TMLE0) = names(Psi_CV.TMLE0)
EnYd0.CVTMLE = rbind(Psi_CV.TMLE = Psi_CV.TMLE0, CI_CV.TMLE = CI_CV.TMLE0)
#E0Ydn, CVTMLE
E0Ydn.CVTMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$E0Ydn.test)))
toreturn = list(EnYdn = rbind(EnYdn.nonCVTMLE, EnYdn.CVTMLE),
EnYd0 = rbind(EnYd0.nonCVTMLE, EnYd0.CVTMLE),
E0Ydn = rbind(E0Ydn.nonCVTMLE, E0Ydn.CVTMLE))
} else {
colnames(EnYdn.CVTMLE) = colnames(EnYdn.nonCVTMLE)
toreturn = list(EYdopt_estimates = rbind(EnYdn.nonCVTMLE, EnYdn.CVTMLE),
SL.odtr = SL.odtr)
}
return(toreturn)
}
# function that computes EYgstar - unadj, TMLE, IPTW, gcomp, CV-TMLE
#' @name EYgstar
#' @aliases EYgstar
#' @title Estimation of E[Ygstar]
#' @description Given a W, A, Y dataset, this function will compute the estimated ODTR using SuperLearner. If a Qbar function is provided that computes the true E[Y|A,W] (e.g., if simulating), the function will also return the true treatment under the optimal rule and other metrics of evaluating the estimated optimal rule's performance. Then, it will estimate E[Ygstar] using g-computation, IPTW, IPTW-DR, TMLE, and CV-TMLE. Follows the framework of Luedtke and van der laan, 2015 and 2016.
#'
#' @param W Data frame of observed baseline covariates
#' @param V Data frame of observed baseline covariates (subset of W) used to design the ODTR
#' @param A Vector of treatment
#' @param Y Vector of outcome (continuous or binary)
#' @param metalearner Discrete ("discrete"), blip-based ("blip").
#' @param g.SL.library SuperLearner library for estimating txt mechanism
#' @param QAW.SL.library SuperLearner library for estimating outcome regression
#' @param blip.SL.library SuperLearner library for estimating the blip
#' @param risk.type Risk type in order to pick optimal combination of coefficients to combine the candidate algorithms. For (1) MSE risk use "CV MSE"; for (2) -E[Ygstar] risk use "CV IPCWDR" (for -E[Ygstar] estimated using double-robust IPTW) or "CV TMLE" (for -E[Ygstar] estimates using TMLE); (3) For the upper bound of the CI of -E[Ygstar] use "CV TMLE CI"
#' @param QAW.fun True outcome regression E[Y|A,W]. Useful for simulations. Default is \code{NULL}.
#' @param VFolds Number of folds to use in cross-validation. Default is 10.
#' @param grid.size Grid size for \code{\link[hitandrun:simplex.sample]{simplex.sample()}} function to create possible combinations of coefficients
#' @param family either "gaussian" or "binomial". Default is null, if outcome is between 0 and 1 it will change to binomial, otherwise gaussian
#' @param contrast An integer to contrast Psi = E[Ygstar]-E[Ycontrast] for CV-TMLE. For example, 0 will contrast Psi = E[Ygstar]-E[Y0]. Default is \code{NULL}.
#' @param odtr.obj An object from the odtr function that estimates the odtr.
#' @param cs_to_try Constants for SL.blip.c
#' @param alphas_to_try Convex combination alphas for SL.blip.alpha
#'
#' @importFrom stats predict var qnorm
#' @import SuperLearner
#'
#' @return If the true Qbar function is specified, the output will be a vector of point estimates of E[Ygstar] and their respective confidence intervals. This will be for both the estimated optimal rule and the true optimal rule. Performance results on the optimal rule will also be output: proportion of people treated under ODTR, proportion of times the estimated rule matches the optimal rule, the mean outcome under the estimated optimal rule under the true mean outcome function, and the mean outcome under the estimated optimal rule under the sample-specific true mean outcome.
#'
#' If the true Qbar is not specified, return:
#' \describe{
#' \item{EYgstar_estimates}{Point estimates and confidence intervals for E[Ygstar], using the unadjusted mean outcome for the people who received the optimal rule, g-computation, IPTW, IPTW-DR, TMLE}
#' \item{SL.odtr}{SuperLearner list. See \code{SL.blip} or \code{SL.vote} documentation.}
#' }
#'
#' @export
#'
EYgstar = function(W, V, A, Y, g.SL.library, QAW.SL.library, blip.SL.library,
metalearner, risk.type,
grid.size = 100, VFolds = 10, QAW.fun = NULL,
family = NULL, contrast = NULL, cs_to_try = NULL, alphas_to_try = NULL){
n = length(Y)
if (is.null(family)) { family = ifelse(max(Y) <= 1 & min(Y) >= 0, "binomial", "gaussian") }
ab = range(Y)
#### All things non-CV ####
SL.odtr = odtr(V=V, W=W, A=A, Y=Y, ab = ab, g.SL.library = g.SL.library, QAW.SL.library = QAW.SL.library, blip.SL.library=blip.SL.library,
dopt.SL.library = NULL, metalearner = metalearner,
risk.type=risk.type, grid.size=grid.size, VFolds=VFolds, QAW.fun = NULL, newV = NULL,
kappa = NULL, family = family, rule.output = "g", cs_to_try, alphas_to_try)
QAW.reg = SL.odtr$QAW.reg
g.reg = SL.odtr$g.reg
gstar1W = SL.odtr$gstar1W
gstar0W = SL.odtr$gstar0W
#EnYgstar, non-CVTMLE
EnYgstar.nonCVTMLE = estimatorsEYgstar_nonCVTMLE(W = W, A = A, Y = Y, gstar1W = gstar1W, gstar0W = gstar0W, QAW.reg = QAW.reg, g.reg = g.reg, ab = ab, contrast = contrast)
if (!is.null(QAW.fun)) {
#E0Ydn, non-CVTMLE
E0Ygstar.nonCVTMLE = mean(QAW.fun(A = 1, W = W)*gstar1W + QAW.fun(A = 0, W = W)*gstar0W)
if (!is.null(contrast)) {
contrastE0Ygstar_fun = function(contrast_i) {
contrast_i = contrast_i
E0Ygstar.nonCVTMLE_i = E0Ygstar.nonCVTMLE - mean(QAW.fun(A = contrast_i, W = W))
return(E0Ygstar.nonCVTMLE_i)
}
contrastE0Ygstar_df = apply(contrast, 2, contrastE0Ygstar_fun)
E0Ygstar.nonCVTMLE = c(EYgstar = E0Ygstar.nonCVTMLE, contrastE0Ygstar_df)
}
}
#### All things CV ####
folds = sample(1:VFolds, size = n, replace = T)
CV.TMLE_fun = function(i){
SL.odtr.train = odtr(V = V[folds!=i,,drop=F], W = W[folds!=i,,drop=F], A = A[folds!=i], Y = Y[folds!=i], newV = V[folds==i,,drop=F],
g.SL.library = g.SL.library, QAW.SL.library = QAW.SL.library, blip.SL.library=blip.SL.library, dopt.SL.library = NULL,
metalearner = metalearner, risk.type=risk.type, grid.size=grid.size, VFolds=VFolds, QAW.fun = NULL,
kappa = NULL, family = family, ab = ab, rule.output = "g", cs_to_try = cs_to_try, alphas_to_try = alphas_to_try)
g.reg.train = SL.odtr.train$g.reg
QAW.reg.train = SL.odtr.train$QAW.reg
gstar1W.test = SL.odtr.train$gstar1W
gstar0W.test = SL.odtr.train$gstar0W
gstarAW.test = ifelse(A[folds == i] == 1, gstar1W.test, gstar0W.test)
g1W.test = predict(g.reg.train, newdata = W[folds == i,,drop=F], type = "response")$pred
gAW.test = ifelse(A[folds == i] == 1, g1W.test, 1 - g1W.test)
Q1W.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = 1), type = "response")$pred
Q0W.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = 0), type = "response")$pred
QAW.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = A[folds == i]), type = "response")$pred
tmle_objects.EnYgstar.test = tmle.g.fun(A = A[folds == i], Y = Y[folds==i], gstarAW = gstarAW.test, gstar1W = gstar1W.test, gstar0W = gstar0W.test, QAW = QAW.test, Q1W = Q1W.test, Q0W = Q0W.test, gAW = gAW.test, ab = ab)
Psi_EnYgstar.test = tmle_objects.EnYgstar.test$psi
varIC_EnYgstar.test = var(tmle_objects.EnYgstar.test$IC)
toreturn = list(Psi_EnYgstar.test = c(EYgstar = Psi_EnYgstar.test), varIC_EnYgstar.test = c(EYgstar = varIC_EnYgstar.test))
if (!is.null(QAW.fun)) {
E0Ygstar.test = mean(QAW.fun(A = 1, W = W[folds == i,,drop=F])*gstar1W.test + QAW.fun(A = 0, W = W[folds == i,,drop=F])*gstar0W.test)
toreturn = list(Psi_EnYgstar.test = c(EYgstar = Psi_EnYgstar.test), varIC_EnYgstar.test = c(EYgstar = varIC_EnYgstar.test),
E0Ygstar.test = c(EYgstar = E0Ygstar.test))
}
if (!is.null(contrast)) {
contrast_fun = function(contrast_i) {
contrast.test_i = contrast_i[folds == i]
Qcontrast.test_i = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = contrast.test_i), type = "response")$pred
tmle_objects.contrast.test_i = tmle.d.fun(A = A[folds == i], Y = Y[folds==i], d = contrast.test_i, Qd = Qcontrast.test_i, gAW = gAW.test, ab = ab)
Psi_EnYgstar.test_i = tmle_objects.EnYgstar.test$psi - tmle_objects.contrast.test_i$psi
varIC_EnYgstar.test_i = var(tmle_objects.EnYgstar.test$IC - tmle_objects.contrast.test_i$IC)
toreturn_contrast = c(Psi_EnYgstar.test_i = Psi_EnYgstar.test_i, varIC_EnYgstar.test_i = varIC_EnYgstar.test_i)
if (!is.null(QAW.fun)) {
E0Ygstar.test_i = mean(QAW.fun(A = 1, W = W[folds == i,,drop=F])*gstar1W.test + QAW.fun(A = 0, W = W[folds == i,,drop=F])*gstar0W.test) - mean(QAW.fun(A = contrast.test_i, W = W[folds == i,,drop=F]))
toreturn_contrast = c(Psi_EnYgstar.test = Psi_EnYgstar.test_i, varIC_EnYgstar.test = varIC_EnYgstar.test_i,
E0Ygstar.test = E0Ygstar.test_i)
}
return(toreturn_contrast)
}
contrast_df = apply(contrast, 2, contrast_fun)
toreturn_contrast = lapply(1:length(toreturn), function(x) c(toreturn[[x]], contrast_df[x,]))
toreturn_contrast = lapply(toreturn_contrast, function(x) setNames(x, c("EYgstar", colnames(contrast_df))))
names(toreturn_contrast) = names(toreturn)
toreturn = toreturn_contrast
}
print(paste("CV TMLE finished fold", i, "of", VFolds))
return(toreturn)
}
CV.TMLE.est = lapply(1:VFolds, CV.TMLE_fun)
#EnYgstar, CVTMLE
Psi_CV.TMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$Psi_EnYgstar.test)))
var_CV.TMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$varIC_EnYgstar.test)))/n
CI_CV.TMLE = sapply(1:length(Psi_CV.TMLE), function(i) Psi_CV.TMLE[i] + c(-1,1)*qnorm(0.975)*sqrt(var_CV.TMLE[i]))
rownames(CI_CV.TMLE) = c("CI_CV.TMLE1", "CI_CV.TMLE2")
colnames(CI_CV.TMLE) = names(Psi_CV.TMLE)
EnYgstar.CVTMLE = rbind(Psi_CV.TMLE = Psi_CV.TMLE, CI_CV.TMLE = CI_CV.TMLE)
if (!is.null(QAW.fun)) {
#E0Ygstar, CVTMLE
E0Ygstar.CVTMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$E0Ygstar.test)))
# regret
d0 = as.numeric(QAW.fun(1,W) - QAW.fun(0,W) <= 0)
regret = mean(QAW.fun(A = 1, W)*SL.odtr$gstar1W + QAW.fun(A = 0, W)*SL.odtr$gstar0W) - mean(QAW.fun(d0,W))
toreturn = list(EnYgstar = rbind(EnYgstar.nonCVTMLE, EnYgstar.CVTMLE),
E0Ygstar = rbind(E0Ygstar.nonCVTMLE, E0Ygstar.CVTMLE),
SL.info = data.frame(regret = regret, param.type = SL.odtr$param.type, param = SL.odtr$param, coef = t(SL.odtr$SL.fit$coef)))
} else {
toreturn = list(EYdopt_estimates = rbind(EnYgstar.nonCVTMLE, EnYgstar.CVTMLE),
SL.odtr = SL.odtr)
}
return(toreturn)
}
# function that computes EYgRC - unadj, TMLE, IPTW, gcomp, CV-TMLE
#' @name EYgRC
#' @aliases EYgstar
#' @title Estimation of E[YgRC]
#' @description Given a W, A, Y dataset, this function will compute the estimated resource constrained (RC) ODTR using SuperLearner. If a Qbar function is provided that computes the true E[Y|A,W] (e.g., if simulating), the function will also return the true (stochastic) treatment under the optimal rule and other metrics of evaluating the estimated rule's performance. Then, it will estimate E[YgRC] using g-computation, IPTW, IPTW-DR, TMLE, and CV-TMLE. Follows the framework of Luedtke and van der laan, 2015 and 2016.
#'
#' @param W Data frame of observed baseline covariates
#' @param V Data frame of observed baseline covariates (subset of W) used to design the ODTR
#' @param A Vector of treatment
#' @param Y Vector of outcome (continuous or binary)
#' @param metalearner Discrete ("discrete"), blip-based ("blip").
#' @param g.SL.library SuperLearner library for estimating txt mechanism
#' @param QAW.SL.library SuperLearner library for estimating outcome regression
#' @param blip.SL.library SuperLearner library for estimating the blip
#' @param risk.type Risk type in order to pick optimal combination of coefficients to combine the candidate algorithms. For (1) MSE risk use "CV MSE"; for (2) -E[Ygstar] risk use "CV IPCWDR" (for -E[Ygstar] estimated using double-robust IPTW) or "CV TMLE" (for -E[Ygstar] estimates using TMLE); (3) For the upper bound of the CI of -E[Ygstar] use "CV TMLE CI"
#' @param QAW.fun True outcome regression E[Y|A,W]. Useful for simulations. Default is \code{NULL}.
#' @param VFolds Number of folds to use in cross-validation. Default is 10.
#' @param grid.size Grid size for \code{\link[hitandrun:simplex.sample]{simplex.sample()}} function to create possible combinations of coefficients
#' @param kappa For ODTR with resource constriants, kappa is the proportion of people in the population who are allowed to receive treatment. Default is \code{NULL}.
#' @param family either "gaussian" or "binomial". Default is null, if outcome is between 0 and 1 it will change to binomial, otherwise gaussian
#' @param contrast An integer to contrast Psi = E[Ygstar]-E[Ycontrast] for CV-TMLE. For example, 0 will contrast Psi = E[Ygstar]-E[Y0]. Default is \code{NULL}.
#' @param odtr.obj An object from the odtr function that estimates the odtr.
#'
#' @importFrom stats predict var qnorm
#' @import SuperLearner
#'
#' @return If the true Qbar function is specified, the output will be a vector of point estimates of E[Ygstar] and their respective confidence intervals. This will be for both the estimated optimal rule and the true optimal rule. Performance results on the optimal rule will also be output: proportion of people treated under ODTR, proportion of times the estimated rule matches the optimal rule, the mean outcome under the estimated optimal rule under the true mean outcome function, and the mean outcome under the estimated optimal rule under the sample-specific true mean outcome.
#'
#' If the true Qbar is not specified, return:
#' \describe{
#' \item{EYgRC_estimates}{Point estimates and confidence intervals for E[YgRC], using the unadjusted mean outcome for the people who received the (stochastic) resource-constrained (RC) optimal rule, g-computation, IPTW, IPTW-DR, TMLE}
#' \item{SL.odtr}{SuperLearner list. See \code{SL.blip} documentation.}
#' }
#'
#' @export
#'
EYgRC = function(W, V, A, Y, g.SL.library = "SL.mean", QAW.SL.library, blip.SL.library,
metalearner = "blip", risk.type = "CV TMLE", kappa,
grid.size = 100, VFolds = 10, QAW.fun = NULL,
family = NULL, contrast = NULL){
n = length(Y)
if (is.null(family)) { family = ifelse(max(Y) <= 1 & min(Y) >= 0, "binomial", "gaussian") }
ab = range(Y)
#### All things non-CV ####
SL.odtr = odtr(V=V, W=W, A=A, Y=Y, ab = ab, g.SL.library = g.SL.library, QAW.SL.library = QAW.SL.library, blip.SL.library=blip.SL.library,
dopt.SL.library = NULL, metalearner = metalearner,
risk.type=risk.type, grid.size=grid.size, VFolds=VFolds, QAW.fun = NULL, newV = NULL,
kappa = kappa, family = family, rule.output = "rc", cs_to_try = NULL, alphas_to_try = NULL)
QAW.reg = SL.odtr$QAW.reg
g.reg = SL.odtr$g.reg
rc.out = SL.odtr$rc.out
#EnYgRC, non-CVTMLE
EnYgRC.nonCVTMLE = estimatorsEYgRC_nonCVTMLE(W = W, A = A, Y = Y, rc.out = rc.out, kappa = kappa, QAW.reg = QAW.reg, g.reg = g.reg, ab = ab, contrast = contrast)
if (!is.null(QAW.fun)) {
#E0Ydn, non-CVTMLE
E0YgRC.nonCVTMLE = mean(unlist(QAW.fun(A = 1, W = W)*rc.out$Prd.is.1) + unlist(QAW.fun(A = 0, W = W)*(1-rc.out$Prd.is.1)))
if (!is.null(contrast)) {
contrastE0YgRC_fun = function(contrast_i) {
E0YgRC.nonCVTMLE_i = E0YgRC.nonCVTMLE - mean(unlist(QAW.fun(A = contrast_i, W = W)))
return(E0YgRC.nonCVTMLE_i)
}
contrastE0YgRC_df = apply(contrast, 2, contrastE0YgRC_fun)
E0YgRC.nonCVTMLE = c(EYgRC = E0YgRC.nonCVTMLE, contrastE0YgRC_df)
}
}
#### All things CV ####
folds = sample(1:VFolds, size = n, replace = T)
CV.TMLE_fun = function(i){
SL.odtr.train = odtr(V = V[folds!=i,,drop=F], W = W[folds!=i,,drop=F], A = A[folds!=i], Y = Y[folds!=i], newV = V[folds==i,,drop=F],
g.SL.library = g.SL.library, QAW.SL.library = QAW.SL.library, blip.SL.library=blip.SL.library, dopt.SL.library = NULL,
metalearner = metalearner, risk.type=risk.type, grid.size=grid.size, VFolds=VFolds, QAW.fun = NULL,
kappa = kappa, family = family, ab = ab, rule.output = "rc", cs_to_try = NULL, alphas_to_try = NULL)
g.reg.train = SL.odtr.train$g.reg
QAW.reg.train = SL.odtr.train$QAW.reg
rc.out.test = SL.odtr.train$rc.out
Prd.is.1.test = rc.out.test$Prd.is.1
Prd.is.0.test = 1 - rc.out.test$Prd.is.1
Prd.is.A.test = ifelse(A[folds == i] == 1, Prd.is.1.test, Prd.is.0.test)
g1W.test = predict(g.reg.train, newdata = W[folds == i,,drop=F], type = "response")$pred
gAW.test = ifelse(A[folds == i] == 1, g1W.test, 1 - g1W.test)
Q1W.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = 1), type = "response")$pred
Q0W.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = 0), type = "response")$pred
QAW.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = A[folds == i]), type = "response")$pred
tmle_objects.EnYgRC.test = tmle.rc.fun(A = A[folds == i], Y = Y[folds==i], gstarAW = Prd.is.A.test, gstar1W = Prd.is.1.test, gstar0W = Prd.is.0.test, QAW = QAW.test, Q1W = Q1W.test, Q0W = Q0W.test, gAW = gAW.test, ab = ab, tauP = rc.out.test$tauP, kappa = kappa)
Psi_EnYgRC.test = tmle_objects.EnYgRC.test$psi
varIC_EnYgRC.test = var(tmle_objects.EnYgRC.test$IC)
toreturn = list(Psi_EnYgRC.test = c(EYgRC = Psi_EnYgRC.test), varIC_EnYgRC.test = c(EYgRC = varIC_EnYgRC.test))
if (!is.null(QAW.fun)) {
E0YgRC.test = mean(unlist(QAW.fun(A = 1, W = W[folds == i,,drop=F])*Prd.is.1.test) + unlist(QAW.fun(A = 0, W = W[folds == i,,drop=F])*Prd.is.0.test))
toreturn = list(Psi_EnYgRC.test = c(EYgRC = Psi_EnYgRC.test), varIC_EnYgRC.test = c(EYgRC = varIC_EnYgRC.test),
E0YgRC.test = c(EYgRC = E0YgRC.test))
}
if (!is.null(contrast)) {
contrast_fun = function(contrast_i) {
contrast.test_i = contrast_i[folds == i]
Qcontrast.test_i = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = contrast.test_i), type = "response")$pred
tmle_objects.contrast.test_i = tmle.d.fun(A = A[folds == i], Y = Y[folds==i], d = contrast.test_i, Qd = Qcontrast.test_i, gAW = gAW.test, ab = ab)
Psi_EnYgRC.test_i = tmle_objects.EnYgRC.test$psi - tmle_objects.contrast.test_i$psi
varIC_EnYgRC.test_i = var(tmle_objects.EnYgRC.test$IC - tmle_objects.contrast.test_i$IC)
toreturn_contrast = c(Psi_EnYgRC.test_i = Psi_EnYgRC.test_i, varIC_EnYgRC.test_i = varIC_EnYgRC.test_i)
if (!is.null(QAW.fun)) {
E0YgRC.test_i = mean(unlist(QAW.fun(A = 1, W = W[folds == i,,drop=F])*Prd.is.1.test) + unlist(QAW.fun(A = 0, W = W[folds == i,,drop=F])*Prd.is.0.test)) - mean(unlist(QAW.fun(A = contrast.test_i, W = W[folds == i,,drop=F])))
toreturn_contrast = c(Psi_EnYgRC.test = Psi_EnYgRC.test_i, varIC_EnYgRC.test = varIC_EnYgRC.test_i,
E0YgRC.test = E0YgRC.test_i)
}
return(toreturn_contrast)
}
contrast_df = apply(contrast, 2, contrast_fun)
toreturn_contrast = lapply(1:length(toreturn), function(x) c(toreturn[[x]], contrast_df[x,]))
toreturn_contrast = lapply(toreturn_contrast, function(x) setNames(x, c("EYgRC", colnames(contrast_df))))
names(toreturn_contrast) = names(toreturn)
toreturn = toreturn_contrast
}
print(paste("CV TMLE finished fold", i, "of", VFolds))
return(toreturn)
}
CV.TMLE.est = lapply(1:VFolds, CV.TMLE_fun)
#EnYgRC, CVTMLE
Psi_CV.TMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$Psi_EnYgRC.test)))
var_CV.TMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$varIC_EnYgRC.test)))/n
CI_CV.TMLE = sapply(1:length(Psi_CV.TMLE), function(i) Psi_CV.TMLE[i] + c(-1,1)*qnorm(0.975)*sqrt(var_CV.TMLE[i]))
rownames(CI_CV.TMLE) = c("CI_CV.TMLE1", "CI_CV.TMLE2")
colnames(CI_CV.TMLE) = names(Psi_CV.TMLE)
EnYgRC.CVTMLE = rbind(Psi_CV.TMLE = Psi_CV.TMLE, CI_CV.TMLE = CI_CV.TMLE)
if (!is.null(QAW.fun)) {
#E0YgRC, CVTMLE
E0YgRC.CVTMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$E0YgRC.test)))
# regret
true_rc.out = dopt.fun(blip = unlist(QAW.fun(1,W) - QAW.fun(0,W)), kappa = kappa)
regret = mean(unlist(QAW.fun(A = 1, W)*SL.odtr$rc.out$Prd.is.1) + unlist(QAW.fun(A = 0, W)*(1 - SL.odtr$rc.out$Prd.is.1))) - mean(unlist(QAW.fun(A = 1, W)*true_rc.out$Prd.is.1) + unlist(QAW.fun(A = 0, W)*(1 - true_rc.out$Prd.is.1)))
# True mean under estimated optimal rule using true QAW
EYdn_QAWHat = mean(unlist(QAW.fun(A = 1, W = W)*rc.out$Prd.is.1) + unlist(QAW.fun(A = 0, W = W)*(1-rc.out$Prd.is.1)))
toreturn = list(EnYgRC = rbind(EnYgRC.nonCVTMLE, EnYgRC.CVTMLE),
E0YgRC = rbind(E0YgRC.nonCVTMLE, E0YgRC.CVTMLE),
SL.info = data.frame(EYdn_QAWHat = EYdn_QAWHat,
true_mean_Prd.is.1 = mean(true_rc.out$Prd.is.1),
est_mean_Prd.is.1 = mean(SL.odtr$rc.out$Prd.is.1),
regret = regret,
true_tauP = true_rc.out$tauP,
est_tauP = SL.odtr$rc.out$tauP,
coef = t(SL.odtr$SL.fit$coef)))
} else {
toreturn = list(EYdopt_estimates = rbind(EnYgRC.nonCVTMLE, EnYgRC.CVTMLE),
SL.odtr = SL.odtr)
}
return(toreturn)
}
|
/R/5valueodtr.R
|
no_license
|
lmmontoya/SL.ODTR
|
R
| false
| false
| 33,931
|
r
|
# function that computes EYdopt - unadj, TMLE, IPTW, gcomp, CV-TMLE
#' @name EYdopt
#' @aliases EYdopt
#' @title Estimation of E[Ydopt]
#' @description Given a W, A, Y dataset, this function will compute the estimated ODTR using SuperLearner. If a Qbar function is provided that computes the true E[Y|A,W] (e.g., if simulating), the function will also return the true treatment under the optimal rule and other metrics of evaluating the estimated optimal rule's performance. Then, it will estimate E[Ydopt] using g-computation, IPTW, IPTW-DR, TMLE, and CV-TMLE. Follows the framework of Luedtke and van der laan, 2015 and 2016.
#'
#' @param W Data frame of observed baseline covariates
#' @param V Data frame of observed baseline covariates (subset of W) used to design the ODTR
#' @param A Vector of treatment
#' @param Y Vector of outcome (continuous or binary)
#' @param metalearner Discrete ("discrete"), blip-based ("blip"), vote-based SuperLearner ("vote"). Note that if metalearner is "vote" then cannot put in kappa.
#' @param g.SL.library SuperLearner library for estimating txt mechanism
#' @param QAW.SL.library SuperLearner library for estimating outcome regression
#' @param blip.SL.library SuperLearner library for estimating the blip
#' @param risk.type Risk type in order to pick optimal combination of coefficients to combine the candidate algorithms. For (1) MSE risk use "CV MSE"; for (2) -E[Ydopt] risk use "CV IPCWDR" (for -E[Ydopt] estimated using double-robust IPTW) or "CV TMLE" (for -E[Ydopt] estimates using TMLE); (3) For the upper bound of the CI of -E[Ydopt] use "CV TMLE CI"
#' @param dopt.SL.library SuperLearner library for estimating dopt directly. Default is \code{NULL}. Could be "DonV", "Qlearn", "OWL", "EARL", "optclass", "RWL", "treatall", "treatnone". Could also be "all" for all algorithms.
#' @param QAW.fun True outcome regression E[Y|A,W]. Useful for simulations. Default is \code{NULL}.
#' @param VFolds Number of folds to use in cross-validation. Default is 10.
#' @param grid.size Grid size for \code{\link[hitandrun:simplex.sample]{simplex.sample()}} function to create possible combinations of coefficients
#' @param family either "gaussian" or "binomial". Default is null, if outcome is between 0 and 1 it will change to binomial, otherwise gaussian
#' @param contrast A dim = (n, num contrasts) matrix or dataframe (with columns preferably named) to contrast Psi = E[Ydopt]-E[Ycontrast] for CV-TMLE. For example, contrast = data.frame("EY0" = rep(0,n)) will contrast Psi = E[Ydopt]-E[Y0]. Default is \code{NULL}.
#' @param odtr.obj An object from the odtr function that estimates the odtr.
#'
#' @importFrom stats predict var qnorm
#' @import SuperLearner
#'
#' @return If the true Qbar function is specified, the output will be a vector of point estimates of E[Ydopt] and their respective confidence intervals. This will be for both the estimated optimal rule and the true optimal rule. Performance results on the optimal rule will also be output: proportion of people treated under ODTR, proportion of times the estimated rule matches the optimal rule, the mean outcome under the estimated optimal rule under the true mean outcome function, and the mean outcome under the estimated optimal rule under the sample-specific true mean outcome.
#'
#' If the true Qbar is not specified, return:
#' \describe{
#' \item{EYdopt_estimates}{Point estimates and confidence intervals for E[Ydopt], using the unadjusted mean outcome for the people who received the optimal rule, g-computation, IPTW, IPTW-DR, TMLE}
#' \item{SL.odtr}{SuperLearner list. See \code{SL.blip} or \code{SL.vote} documentation.}
#' }
#'
#' @references
#' van der Laan, Mark J., and Alexander R. Luedtke. "Targeted learning of the mean outcome under an optimal dynamic treatment rule." \emph{Journal of causal inference} 3.1 (2015): 61-95.
#'
#' Luedtke, Alexander R., and Mark J. van der Laan. "Super-learning of an optimal dynamic treatment rule." \emph{The international journal of biostatistics} 12.1 (2016): 305-332.
#'
#' Luedtke, Alexander R., and Mark J. van der Laan. "Optimal individualized treatments in resource-limited settings." \emph{The international journal of biostatistics} 12.1 (2016): 283-303.
#'
#' Coyle, J.R. (2017). Jeremy Coyle, “Computational Considerations for Targeted Learning” PhD diss., University of California, Berkeley 2017 \url{https://escholarship.org/uc/item/9kh0b9vm}.
#'
#' @export
#'
#' @examples
#' ## Example
#' library(SuperLearner)
#' library(hitandrun)
#' ObsData = subset(DGP_bin_simple(1000), select = -c(A_star, Y_star))
#' W = subset(ObsData, select = -c(A,Y))
#' V = W
#' A = ObsData$A
#' Y = ObsData$Y
#'
#' # E[Ydopt] using blip-based estimate of ODTR with risk function CV-TMLE
#' EYdopt(W = W, A = A, Y = Y, V = W, blip.SL.library = "SL.blip.HTEepi", g.SL.library = "SL.mean", QAW.SL.library = "SL.QAW.HTEepi", risk.type = "CV TMLE", metalearner = 'blip')
EYdopt = function(W, V, A, Y, g.SL.library = "SL.mean", QAW.SL.library, blip.SL.library, dopt.SL.library = NULL,
metalearner = "blip", risk.type = "CV TMLE",
grid.size = 100, VFolds = 10, QAW.fun = NULL,
family = NULL, contrast = NULL){
n = length(Y)
if (is.null(family)) { family = ifelse(max(Y) <= 1 & min(Y) >= 0, "binomial", "gaussian") }
ab = range(Y)
#### All things non-CV ####
SL.odtr = odtr(V=V, W=W, A=A, Y=Y, ab = ab, g.SL.library = g.SL.library, QAW.SL.library = QAW.SL.library, blip.SL.library=blip.SL.library,
dopt.SL.library = dopt.SL.library, metalearner = metalearner,
risk.type=risk.type, grid.size=grid.size, VFolds=VFolds, QAW.fun = NULL, newV = NULL,
kappa = NULL, family = family, rule.output = "d")
QAW.reg = SL.odtr$QAW.reg
g.reg = SL.odtr$g.reg
dopt = SL.odtr$dopt
#EnYdn, non-CVTMLE
EnYdn.nonCVTMLE = estimatorsEYd_nonCVTMLE(W = W, A = A, Y = Y, d = dopt, QAW.reg = QAW.reg, g.reg = g.reg, ab = ab, contrast = contrast)
if (!is.null(QAW.fun)) {
dopt0 = dopt.fun(blip = QAW.fun(A = 1, W = W) - QAW.fun(A = 0, W = W), kappa = NULL)
#EnYd0, non-CVTMLE
EnYd0.nonCVTMLE = estimatorsEYd_nonCVTMLE(W = W, A = A, Y = Y, d = dopt0, QAW.reg = QAW.reg, g.reg = g.reg, ab = ab, contrast = contrast)
#E0Ydn, non-CVTMLE
E0Ydn.nonCVTMLE = mean(QAW.fun(A = dopt, W = W))
if (!is.null(contrast)) {
contrastE0Ydn_fun = function(contrast_i) {
contrast_i = contrast_i
E0Ydn.nonCVTMLE_i = E0Ydn.nonCVTMLE - mean(QAW.fun(A = contrast_i, W = W))
return(E0Ydn.nonCVTMLE_i)
}
contrastE0Ydn_df = apply(contrast, 2, contrastE0Ydn_fun)
E0Ydn.nonCVTMLE = c(EYd = E0Ydn.nonCVTMLE, contrastE0Ydn_df)
}
}
#### All things CV ####
folds = sample(1:VFolds, size = n, replace = T)
CV.TMLE_fun = function(i){
SL.odtr.train = odtr(V = V[folds!=i,,drop=F], W = W[folds!=i,,drop=F], A = A[folds!=i], Y = Y[folds!=i], newV = V[folds==i,,drop=F],
g.SL.library = g.SL.library, QAW.SL.library = QAW.SL.library, blip.SL.library=blip.SL.library, dopt.SL.library = dopt.SL.library,
metalearner = metalearner, risk.type=risk.type, grid.size=grid.size, VFolds=VFolds, QAW.fun = NULL,
kappa = NULL, family = family, ab = ab, rule.output = "d")
g.reg.train = SL.odtr.train$g.reg
QAW.reg.train = SL.odtr.train$QAW.reg
dopt.test = SL.odtr.train$dopt
g1W.test = predict(g.reg.train, newdata = W[folds == i,,drop=F], type = "response")$pred
gAW.test = ifelse(A[folds == i] == 1, g1W.test, 1 - g1W.test)
Qdopt.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = dopt.test), type = "response")$pred
tmle_objects.EnYdn.test = tmle.d.fun(A = A[folds == i], Y = Y[folds==i], d = dopt.test, Qd = Qdopt.test, gAW = gAW.test, ab = ab)
Psi_EnYdn.test = tmle_objects.EnYdn.test$psi
varIC_EnYdn.test = var(tmle_objects.EnYdn.test$IC)
toreturn = list(Psi_EnYdn.test = c(EYd = Psi_EnYdn.test), varIC_EnYdn.test = c(EYd = varIC_EnYdn.test))
if (!is.null(QAW.fun)) {
E0Ydn.test = mean(QAW.fun(A = dopt.test, W = W[folds == i,,drop=F]))
Qdopt0.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = dopt0[folds == i]), type = "response")$pred
tmle_objects.EnYd0.test = tmle.d.fun(A = A[folds == i], Y = Y[folds==i], d = dopt0[folds == i], Qd = Qdopt0.test, gAW = gAW.test, ab = ab)
Psi_EnYd0.test = tmle_objects.EnYd0.test$psi
varIC_EnYd0.test = var(tmle_objects.EnYd0.test$IC)
toreturn = list(Psi_EnYdn.test = c(EYd = Psi_EnYdn.test), varIC_EnYdn.test = c(EYd = varIC_EnYdn.test),
Psi_EnYd0.test = c(EYd = Psi_EnYd0.test), varIC_EnYd0.test = c(EYd = varIC_EnYd0.test),
E0Ydn.test = c(EYd = E0Ydn.test))
}
if (!is.null(contrast)) {
contrast_fun = function(contrast_i) {
contrast.test_i = contrast_i[folds == i]
Qcontrast.test_i = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = contrast.test_i), type = "response")$pred
tmle_objects.contrast.test_i = tmle.d.fun(A = A[folds == i], Y = Y[folds==i], d = contrast.test_i, Qd = Qcontrast.test_i, gAW = gAW.test, ab = ab)
Psi_EnYdn.test_i = tmle_objects.EnYdn.test$psi - tmle_objects.contrast.test_i$psi
varIC_EnYdn.test_i = var(tmle_objects.EnYdn.test$IC - tmle_objects.contrast.test_i$IC)
toreturn_contrast = c(Psi_EnYdn.test_i = Psi_EnYdn.test_i, varIC_EnYdn.test_i = varIC_EnYdn.test_i)
if (!is.null(QAW.fun)) {
E0Ydn.test_i = mean(unlist(QAW.fun(A = dopt.test, W = W[folds == i,,drop=F])) - unlist(QAW.fun(A = contrast.test_i, W = W[folds == i,,drop=F])))
Psi_EnYd0.test_i = tmle_objects.EnYd0.test$psi - tmle_objects.contrast.test_i$psi
varIC_EnYd0.test_i = var(tmle_objects.EnYd0.test$IC - tmle_objects.contrast.test_i$IC)
toreturn_contrast = c(Psi_EnYdn.test = Psi_EnYdn.test_i, varIC_EnYdn.test = varIC_EnYdn.test_i,
Psi_EnYd0.test = Psi_EnYd0.test_i, varIC_EnYd0.test = varIC_EnYd0.test_i,
E0Ydn.test = E0Ydn.test_i)
}
return(toreturn_contrast)
}
contrast_df = apply(contrast, 2, contrast_fun)
toreturn_contrast = lapply(1:length(toreturn), function(x) c(toreturn[[x]], contrast_df[x,]))
toreturn_contrast = lapply(toreturn_contrast, function(x) setNames(x, c("EYd", colnames(contrast_df))))
names(toreturn_contrast) = names(toreturn)
toreturn = toreturn_contrast
}
print(paste("CV TMLE finished fold", i, "of", VFolds))
return(toreturn)
}
CV.TMLE.est = lapply(1:VFolds, CV.TMLE_fun)
#EnYdn, CVTMLE
Psi_CV.TMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$Psi_EnYdn.test)))
var_CV.TMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$varIC_EnYdn.test)))/n
CI_CV.TMLE = sapply(1:length(Psi_CV.TMLE), function(i) Psi_CV.TMLE[i] + c(-1,1)*qnorm(0.975)*sqrt(var_CV.TMLE[i]))
rownames(CI_CV.TMLE) = c("CI_CV.TMLE1", "CI_CV.TMLE2")
colnames(CI_CV.TMLE) = names(Psi_CV.TMLE)
EnYdn.CVTMLE = rbind(Psi_CV.TMLE = Psi_CV.TMLE, CI_CV.TMLE = CI_CV.TMLE)
if (!is.null(QAW.fun)) {
#EnYd0, CVTMLE
Psi_CV.TMLE0 = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$Psi_EnYd0.test)))
var_CV.TMLE0 = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$varIC_EnYd0.test)))/n
CI_CV.TMLE0 = sapply(1:length(Psi_CV.TMLE0), function(i) Psi_CV.TMLE0[i] + c(-1,1)*qnorm(0.975)*sqrt(var_CV.TMLE0[i]))
rownames(CI_CV.TMLE0) = c("CI_CV.TMLE1", "CI_CV.TMLE2")
colnames(CI_CV.TMLE0) = names(Psi_CV.TMLE0)
EnYd0.CVTMLE = rbind(Psi_CV.TMLE = Psi_CV.TMLE0, CI_CV.TMLE = CI_CV.TMLE0)
#E0Ydn, CVTMLE
E0Ydn.CVTMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$E0Ydn.test)))
toreturn = list(EnYdn = rbind(EnYdn.nonCVTMLE, EnYdn.CVTMLE),
EnYd0 = rbind(EnYd0.nonCVTMLE, EnYd0.CVTMLE),
E0Ydn = rbind(E0Ydn.nonCVTMLE, E0Ydn.CVTMLE))
} else {
colnames(EnYdn.CVTMLE) = colnames(EnYdn.nonCVTMLE)
toreturn = list(EYdopt_estimates = rbind(EnYdn.nonCVTMLE, EnYdn.CVTMLE),
SL.odtr = SL.odtr)
}
return(toreturn)
}
# function that computes EYgstar - unadj, TMLE, IPTW, gcomp, CV-TMLE
#' @name EYgstar
#' @aliases EYgstar
#' @title Estimation of E[Ygstar]
#' @description Given a W, A, Y dataset, this function will compute the estimated ODTR using SuperLearner. If a Qbar function is provided that computes the true E[Y|A,W] (e.g., if simulating), the function will also return the true treatment under the optimal rule and other metrics of evaluating the estimated optimal rule's performance. Then, it will estimate E[Ygstar] using g-computation, IPTW, IPTW-DR, TMLE, and CV-TMLE. Follows the framework of Luedtke and van der laan, 2015 and 2016.
#'
#' @param W Data frame of observed baseline covariates
#' @param V Data frame of observed baseline covariates (subset of W) used to design the ODTR
#' @param A Vector of treatment
#' @param Y Vector of outcome (continuous or binary)
#' @param metalearner Discrete ("discrete"), blip-based ("blip").
#' @param g.SL.library SuperLearner library for estimating txt mechanism
#' @param QAW.SL.library SuperLearner library for estimating outcome regression
#' @param blip.SL.library SuperLearner library for estimating the blip
#' @param risk.type Risk type in order to pick optimal combination of coefficients to combine the candidate algorithms. For (1) MSE risk use "CV MSE"; for (2) -E[Ygstar] risk use "CV IPCWDR" (for -E[Ygstar] estimated using double-robust IPTW) or "CV TMLE" (for -E[Ygstar] estimates using TMLE); (3) For the upper bound of the CI of -E[Ygstar] use "CV TMLE CI"
#' @param QAW.fun True outcome regression E[Y|A,W]. Useful for simulations. Default is \code{NULL}.
#' @param VFolds Number of folds to use in cross-validation. Default is 10.
#' @param grid.size Grid size for \code{\link[hitandrun:simplex.sample]{simplex.sample()}} function to create possible combinations of coefficients
#' @param family either "gaussian" or "binomial". Default is null, if outcome is between 0 and 1 it will change to binomial, otherwise gaussian
#' @param contrast An integer to contrast Psi = E[Ygstar]-E[Ycontrast] for CV-TMLE. For example, 0 will contrast Psi = E[Ygstar]-E[Y0]. Default is \code{NULL}.
#' @param odtr.obj An object from the odtr function that estimates the odtr.
#' @param cs_to_try Constants for SL.blip.c
#' @param alphas_to_try Convex combination alphas for SL.blip.alpha
#'
#' @importFrom stats predict var qnorm
#' @import SuperLearner
#'
#' @return If the true Qbar function is specified, the output will be a vector of point estimates of E[Ygstar] and their respective confidence intervals. This will be for both the estimated optimal rule and the true optimal rule. Performance results on the optimal rule will also be output: proportion of people treated under ODTR, proportion of times the estimated rule matches the optimal rule, the mean outcome under the estimated optimal rule under the true mean outcome function, and the mean outcome under the estimated optimal rule under the sample-specific true mean outcome.
#'
#' If the true Qbar is not specified, return:
#' \describe{
#' \item{EYgstar_estimates}{Point estimates and confidence intervals for E[Ygstar], using the unadjusted mean outcome for the people who received the optimal rule, g-computation, IPTW, IPTW-DR, TMLE}
#' \item{SL.odtr}{SuperLearner list. See \code{SL.blip} or \code{SL.vote} documentation.}
#' }
#'
#' @export
#'
EYgstar = function(W, V, A, Y, g.SL.library, QAW.SL.library, blip.SL.library,
metalearner, risk.type,
grid.size = 100, VFolds = 10, QAW.fun = NULL,
family = NULL, contrast = NULL, cs_to_try = NULL, alphas_to_try = NULL){
n = length(Y)
if (is.null(family)) { family = ifelse(max(Y) <= 1 & min(Y) >= 0, "binomial", "gaussian") }
ab = range(Y)
#### All things non-CV ####
SL.odtr = odtr(V=V, W=W, A=A, Y=Y, ab = ab, g.SL.library = g.SL.library, QAW.SL.library = QAW.SL.library, blip.SL.library=blip.SL.library,
dopt.SL.library = NULL, metalearner = metalearner,
risk.type=risk.type, grid.size=grid.size, VFolds=VFolds, QAW.fun = NULL, newV = NULL,
kappa = NULL, family = family, rule.output = "g", cs_to_try, alphas_to_try)
QAW.reg = SL.odtr$QAW.reg
g.reg = SL.odtr$g.reg
gstar1W = SL.odtr$gstar1W
gstar0W = SL.odtr$gstar0W
#EnYgstar, non-CVTMLE
EnYgstar.nonCVTMLE = estimatorsEYgstar_nonCVTMLE(W = W, A = A, Y = Y, gstar1W = gstar1W, gstar0W = gstar0W, QAW.reg = QAW.reg, g.reg = g.reg, ab = ab, contrast = contrast)
if (!is.null(QAW.fun)) {
#E0Ydn, non-CVTMLE
E0Ygstar.nonCVTMLE = mean(QAW.fun(A = 1, W = W)*gstar1W + QAW.fun(A = 0, W = W)*gstar0W)
if (!is.null(contrast)) {
contrastE0Ygstar_fun = function(contrast_i) {
contrast_i = contrast_i
E0Ygstar.nonCVTMLE_i = E0Ygstar.nonCVTMLE - mean(QAW.fun(A = contrast_i, W = W))
return(E0Ygstar.nonCVTMLE_i)
}
contrastE0Ygstar_df = apply(contrast, 2, contrastE0Ygstar_fun)
E0Ygstar.nonCVTMLE = c(EYgstar = E0Ygstar.nonCVTMLE, contrastE0Ygstar_df)
}
}
#### All things CV ####
folds = sample(1:VFolds, size = n, replace = T)
CV.TMLE_fun = function(i){
SL.odtr.train = odtr(V = V[folds!=i,,drop=F], W = W[folds!=i,,drop=F], A = A[folds!=i], Y = Y[folds!=i], newV = V[folds==i,,drop=F],
g.SL.library = g.SL.library, QAW.SL.library = QAW.SL.library, blip.SL.library=blip.SL.library, dopt.SL.library = NULL,
metalearner = metalearner, risk.type=risk.type, grid.size=grid.size, VFolds=VFolds, QAW.fun = NULL,
kappa = NULL, family = family, ab = ab, rule.output = "g", cs_to_try = cs_to_try, alphas_to_try = alphas_to_try)
g.reg.train = SL.odtr.train$g.reg
QAW.reg.train = SL.odtr.train$QAW.reg
gstar1W.test = SL.odtr.train$gstar1W
gstar0W.test = SL.odtr.train$gstar0W
gstarAW.test = ifelse(A[folds == i] == 1, gstar1W.test, gstar0W.test)
g1W.test = predict(g.reg.train, newdata = W[folds == i,,drop=F], type = "response")$pred
gAW.test = ifelse(A[folds == i] == 1, g1W.test, 1 - g1W.test)
Q1W.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = 1), type = "response")$pred
Q0W.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = 0), type = "response")$pred
QAW.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = A[folds == i]), type = "response")$pred
tmle_objects.EnYgstar.test = tmle.g.fun(A = A[folds == i], Y = Y[folds==i], gstarAW = gstarAW.test, gstar1W = gstar1W.test, gstar0W = gstar0W.test, QAW = QAW.test, Q1W = Q1W.test, Q0W = Q0W.test, gAW = gAW.test, ab = ab)
Psi_EnYgstar.test = tmle_objects.EnYgstar.test$psi
varIC_EnYgstar.test = var(tmle_objects.EnYgstar.test$IC)
toreturn = list(Psi_EnYgstar.test = c(EYgstar = Psi_EnYgstar.test), varIC_EnYgstar.test = c(EYgstar = varIC_EnYgstar.test))
if (!is.null(QAW.fun)) {
E0Ygstar.test = mean(QAW.fun(A = 1, W = W[folds == i,,drop=F])*gstar1W.test + QAW.fun(A = 0, W = W[folds == i,,drop=F])*gstar0W.test)
toreturn = list(Psi_EnYgstar.test = c(EYgstar = Psi_EnYgstar.test), varIC_EnYgstar.test = c(EYgstar = varIC_EnYgstar.test),
E0Ygstar.test = c(EYgstar = E0Ygstar.test))
}
if (!is.null(contrast)) {
contrast_fun = function(contrast_i) {
contrast.test_i = contrast_i[folds == i]
Qcontrast.test_i = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = contrast.test_i), type = "response")$pred
tmle_objects.contrast.test_i = tmle.d.fun(A = A[folds == i], Y = Y[folds==i], d = contrast.test_i, Qd = Qcontrast.test_i, gAW = gAW.test, ab = ab)
Psi_EnYgstar.test_i = tmle_objects.EnYgstar.test$psi - tmle_objects.contrast.test_i$psi
varIC_EnYgstar.test_i = var(tmle_objects.EnYgstar.test$IC - tmle_objects.contrast.test_i$IC)
toreturn_contrast = c(Psi_EnYgstar.test_i = Psi_EnYgstar.test_i, varIC_EnYgstar.test_i = varIC_EnYgstar.test_i)
if (!is.null(QAW.fun)) {
E0Ygstar.test_i = mean(QAW.fun(A = 1, W = W[folds == i,,drop=F])*gstar1W.test + QAW.fun(A = 0, W = W[folds == i,,drop=F])*gstar0W.test) - mean(QAW.fun(A = contrast.test_i, W = W[folds == i,,drop=F]))
toreturn_contrast = c(Psi_EnYgstar.test = Psi_EnYgstar.test_i, varIC_EnYgstar.test = varIC_EnYgstar.test_i,
E0Ygstar.test = E0Ygstar.test_i)
}
return(toreturn_contrast)
}
contrast_df = apply(contrast, 2, contrast_fun)
toreturn_contrast = lapply(1:length(toreturn), function(x) c(toreturn[[x]], contrast_df[x,]))
toreturn_contrast = lapply(toreturn_contrast, function(x) setNames(x, c("EYgstar", colnames(contrast_df))))
names(toreturn_contrast) = names(toreturn)
toreturn = toreturn_contrast
}
print(paste("CV TMLE finished fold", i, "of", VFolds))
return(toreturn)
}
CV.TMLE.est = lapply(1:VFolds, CV.TMLE_fun)
#EnYgstar, CVTMLE
Psi_CV.TMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$Psi_EnYgstar.test)))
var_CV.TMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$varIC_EnYgstar.test)))/n
CI_CV.TMLE = sapply(1:length(Psi_CV.TMLE), function(i) Psi_CV.TMLE[i] + c(-1,1)*qnorm(0.975)*sqrt(var_CV.TMLE[i]))
rownames(CI_CV.TMLE) = c("CI_CV.TMLE1", "CI_CV.TMLE2")
colnames(CI_CV.TMLE) = names(Psi_CV.TMLE)
EnYgstar.CVTMLE = rbind(Psi_CV.TMLE = Psi_CV.TMLE, CI_CV.TMLE = CI_CV.TMLE)
if (!is.null(QAW.fun)) {
#E0Ygstar, CVTMLE
E0Ygstar.CVTMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$E0Ygstar.test)))
# regret
d0 = as.numeric(QAW.fun(1,W) - QAW.fun(0,W) <= 0)
regret = mean(QAW.fun(A = 1, W)*SL.odtr$gstar1W + QAW.fun(A = 0, W)*SL.odtr$gstar0W) - mean(QAW.fun(d0,W))
toreturn = list(EnYgstar = rbind(EnYgstar.nonCVTMLE, EnYgstar.CVTMLE),
E0Ygstar = rbind(E0Ygstar.nonCVTMLE, E0Ygstar.CVTMLE),
SL.info = data.frame(regret = regret, param.type = SL.odtr$param.type, param = SL.odtr$param, coef = t(SL.odtr$SL.fit$coef)))
} else {
toreturn = list(EYdopt_estimates = rbind(EnYgstar.nonCVTMLE, EnYgstar.CVTMLE),
SL.odtr = SL.odtr)
}
return(toreturn)
}
# function that computes EYgRC - unadj, TMLE, IPTW, gcomp, CV-TMLE
#' @name EYgRC
#' @aliases EYgstar
#' @title Estimation of E[YgRC]
#' @description Given a W, A, Y dataset, this function will compute the estimated resource constrained (RC) ODTR using SuperLearner. If a Qbar function is provided that computes the true E[Y|A,W] (e.g., if simulating), the function will also return the true (stochastic) treatment under the optimal rule and other metrics of evaluating the estimated rule's performance. Then, it will estimate E[YgRC] using g-computation, IPTW, IPTW-DR, TMLE, and CV-TMLE. Follows the framework of Luedtke and van der laan, 2015 and 2016.
#'
#' @param W Data frame of observed baseline covariates
#' @param V Data frame of observed baseline covariates (subset of W) used to design the ODTR
#' @param A Vector of treatment
#' @param Y Vector of outcome (continuous or binary)
#' @param metalearner Discrete ("discrete"), blip-based ("blip").
#' @param g.SL.library SuperLearner library for estimating txt mechanism
#' @param QAW.SL.library SuperLearner library for estimating outcome regression
#' @param blip.SL.library SuperLearner library for estimating the blip
#' @param risk.type Risk type in order to pick optimal combination of coefficients to combine the candidate algorithms. For (1) MSE risk use "CV MSE"; for (2) -E[Ygstar] risk use "CV IPCWDR" (for -E[Ygstar] estimated using double-robust IPTW) or "CV TMLE" (for -E[Ygstar] estimates using TMLE); (3) For the upper bound of the CI of -E[Ygstar] use "CV TMLE CI"
#' @param QAW.fun True outcome regression E[Y|A,W]. Useful for simulations. Default is \code{NULL}.
#' @param VFolds Number of folds to use in cross-validation. Default is 10.
#' @param grid.size Grid size for \code{\link[hitandrun:simplex.sample]{simplex.sample()}} function to create possible combinations of coefficients
#' @param kappa For ODTR with resource constriants, kappa is the proportion of people in the population who are allowed to receive treatment. Default is \code{NULL}.
#' @param family either "gaussian" or "binomial". Default is null, if outcome is between 0 and 1 it will change to binomial, otherwise gaussian
#' @param contrast An integer to contrast Psi = E[Ygstar]-E[Ycontrast] for CV-TMLE. For example, 0 will contrast Psi = E[Ygstar]-E[Y0]. Default is \code{NULL}.
#' @param odtr.obj An object from the odtr function that estimates the odtr.
#'
#' @importFrom stats predict var qnorm
#' @import SuperLearner
#'
#' @return If the true Qbar function is specified, the output will be a vector of point estimates of E[Ygstar] and their respective confidence intervals. This will be for both the estimated optimal rule and the true optimal rule. Performance results on the optimal rule will also be output: proportion of people treated under ODTR, proportion of times the estimated rule matches the optimal rule, the mean outcome under the estimated optimal rule under the true mean outcome function, and the mean outcome under the estimated optimal rule under the sample-specific true mean outcome.
#'
#' If the true Qbar is not specified, return:
#' \describe{
#' \item{EYgRC_estimates}{Point estimates and confidence intervals for E[YgRC], using the unadjusted mean outcome for the people who received the (stochastic) resource-constrained (RC) optimal rule, g-computation, IPTW, IPTW-DR, TMLE}
#' \item{SL.odtr}{SuperLearner list. See \code{SL.blip} documentation.}
#' }
#'
#' @export
#'
EYgRC = function(W, V, A, Y, g.SL.library = "SL.mean", QAW.SL.library, blip.SL.library,
metalearner = "blip", risk.type = "CV TMLE", kappa,
grid.size = 100, VFolds = 10, QAW.fun = NULL,
family = NULL, contrast = NULL){
n = length(Y)
if (is.null(family)) { family = ifelse(max(Y) <= 1 & min(Y) >= 0, "binomial", "gaussian") }
ab = range(Y)
#### All things non-CV ####
SL.odtr = odtr(V=V, W=W, A=A, Y=Y, ab = ab, g.SL.library = g.SL.library, QAW.SL.library = QAW.SL.library, blip.SL.library=blip.SL.library,
dopt.SL.library = NULL, metalearner = metalearner,
risk.type=risk.type, grid.size=grid.size, VFolds=VFolds, QAW.fun = NULL, newV = NULL,
kappa = kappa, family = family, rule.output = "rc", cs_to_try = NULL, alphas_to_try = NULL)
QAW.reg = SL.odtr$QAW.reg
g.reg = SL.odtr$g.reg
rc.out = SL.odtr$rc.out
#EnYgRC, non-CVTMLE
EnYgRC.nonCVTMLE = estimatorsEYgRC_nonCVTMLE(W = W, A = A, Y = Y, rc.out = rc.out, kappa = kappa, QAW.reg = QAW.reg, g.reg = g.reg, ab = ab, contrast = contrast)
if (!is.null(QAW.fun)) {
#E0Ydn, non-CVTMLE
E0YgRC.nonCVTMLE = mean(unlist(QAW.fun(A = 1, W = W)*rc.out$Prd.is.1) + unlist(QAW.fun(A = 0, W = W)*(1-rc.out$Prd.is.1)))
if (!is.null(contrast)) {
contrastE0YgRC_fun = function(contrast_i) {
E0YgRC.nonCVTMLE_i = E0YgRC.nonCVTMLE - mean(unlist(QAW.fun(A = contrast_i, W = W)))
return(E0YgRC.nonCVTMLE_i)
}
contrastE0YgRC_df = apply(contrast, 2, contrastE0YgRC_fun)
E0YgRC.nonCVTMLE = c(EYgRC = E0YgRC.nonCVTMLE, contrastE0YgRC_df)
}
}
#### All things CV ####
folds = sample(1:VFolds, size = n, replace = T)
CV.TMLE_fun = function(i){
SL.odtr.train = odtr(V = V[folds!=i,,drop=F], W = W[folds!=i,,drop=F], A = A[folds!=i], Y = Y[folds!=i], newV = V[folds==i,,drop=F],
g.SL.library = g.SL.library, QAW.SL.library = QAW.SL.library, blip.SL.library=blip.SL.library, dopt.SL.library = NULL,
metalearner = metalearner, risk.type=risk.type, grid.size=grid.size, VFolds=VFolds, QAW.fun = NULL,
kappa = kappa, family = family, ab = ab, rule.output = "rc", cs_to_try = NULL, alphas_to_try = NULL)
g.reg.train = SL.odtr.train$g.reg
QAW.reg.train = SL.odtr.train$QAW.reg
rc.out.test = SL.odtr.train$rc.out
Prd.is.1.test = rc.out.test$Prd.is.1
Prd.is.0.test = 1 - rc.out.test$Prd.is.1
Prd.is.A.test = ifelse(A[folds == i] == 1, Prd.is.1.test, Prd.is.0.test)
g1W.test = predict(g.reg.train, newdata = W[folds == i,,drop=F], type = "response")$pred
gAW.test = ifelse(A[folds == i] == 1, g1W.test, 1 - g1W.test)
Q1W.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = 1), type = "response")$pred
Q0W.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = 0), type = "response")$pred
QAW.test = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = A[folds == i]), type = "response")$pred
tmle_objects.EnYgRC.test = tmle.rc.fun(A = A[folds == i], Y = Y[folds==i], gstarAW = Prd.is.A.test, gstar1W = Prd.is.1.test, gstar0W = Prd.is.0.test, QAW = QAW.test, Q1W = Q1W.test, Q0W = Q0W.test, gAW = gAW.test, ab = ab, tauP = rc.out.test$tauP, kappa = kappa)
Psi_EnYgRC.test = tmle_objects.EnYgRC.test$psi
varIC_EnYgRC.test = var(tmle_objects.EnYgRC.test$IC)
toreturn = list(Psi_EnYgRC.test = c(EYgRC = Psi_EnYgRC.test), varIC_EnYgRC.test = c(EYgRC = varIC_EnYgRC.test))
if (!is.null(QAW.fun)) {
E0YgRC.test = mean(unlist(QAW.fun(A = 1, W = W[folds == i,,drop=F])*Prd.is.1.test) + unlist(QAW.fun(A = 0, W = W[folds == i,,drop=F])*Prd.is.0.test))
toreturn = list(Psi_EnYgRC.test = c(EYgRC = Psi_EnYgRC.test), varIC_EnYgRC.test = c(EYgRC = varIC_EnYgRC.test),
E0YgRC.test = c(EYgRC = E0YgRC.test))
}
if (!is.null(contrast)) {
contrast_fun = function(contrast_i) {
contrast.test_i = contrast_i[folds == i]
Qcontrast.test_i = predict(QAW.reg.train, newdata = data.frame(W[folds == i,,drop=F], A = contrast.test_i), type = "response")$pred
tmle_objects.contrast.test_i = tmle.d.fun(A = A[folds == i], Y = Y[folds==i], d = contrast.test_i, Qd = Qcontrast.test_i, gAW = gAW.test, ab = ab)
Psi_EnYgRC.test_i = tmle_objects.EnYgRC.test$psi - tmle_objects.contrast.test_i$psi
varIC_EnYgRC.test_i = var(tmle_objects.EnYgRC.test$IC - tmle_objects.contrast.test_i$IC)
toreturn_contrast = c(Psi_EnYgRC.test_i = Psi_EnYgRC.test_i, varIC_EnYgRC.test_i = varIC_EnYgRC.test_i)
if (!is.null(QAW.fun)) {
E0YgRC.test_i = mean(unlist(QAW.fun(A = 1, W = W[folds == i,,drop=F])*Prd.is.1.test) + unlist(QAW.fun(A = 0, W = W[folds == i,,drop=F])*Prd.is.0.test)) - mean(unlist(QAW.fun(A = contrast.test_i, W = W[folds == i,,drop=F])))
toreturn_contrast = c(Psi_EnYgRC.test = Psi_EnYgRC.test_i, varIC_EnYgRC.test = varIC_EnYgRC.test_i,
E0YgRC.test = E0YgRC.test_i)
}
return(toreturn_contrast)
}
contrast_df = apply(contrast, 2, contrast_fun)
toreturn_contrast = lapply(1:length(toreturn), function(x) c(toreturn[[x]], contrast_df[x,]))
toreturn_contrast = lapply(toreturn_contrast, function(x) setNames(x, c("EYgRC", colnames(contrast_df))))
names(toreturn_contrast) = names(toreturn)
toreturn = toreturn_contrast
}
print(paste("CV TMLE finished fold", i, "of", VFolds))
return(toreturn)
}
CV.TMLE.est = lapply(1:VFolds, CV.TMLE_fun)
#EnYgRC, CVTMLE
Psi_CV.TMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$Psi_EnYgRC.test)))
var_CV.TMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$varIC_EnYgRC.test)))/n
CI_CV.TMLE = sapply(1:length(Psi_CV.TMLE), function(i) Psi_CV.TMLE[i] + c(-1,1)*qnorm(0.975)*sqrt(var_CV.TMLE[i]))
rownames(CI_CV.TMLE) = c("CI_CV.TMLE1", "CI_CV.TMLE2")
colnames(CI_CV.TMLE) = names(Psi_CV.TMLE)
EnYgRC.CVTMLE = rbind(Psi_CV.TMLE = Psi_CV.TMLE, CI_CV.TMLE = CI_CV.TMLE)
if (!is.null(QAW.fun)) {
#E0YgRC, CVTMLE
E0YgRC.CVTMLE = colMeans(do.call('rbind', lapply(1:VFolds, function(i) CV.TMLE.est[[i]]$E0YgRC.test)))
# regret
true_rc.out = dopt.fun(blip = unlist(QAW.fun(1,W) - QAW.fun(0,W)), kappa = kappa)
regret = mean(unlist(QAW.fun(A = 1, W)*SL.odtr$rc.out$Prd.is.1) + unlist(QAW.fun(A = 0, W)*(1 - SL.odtr$rc.out$Prd.is.1))) - mean(unlist(QAW.fun(A = 1, W)*true_rc.out$Prd.is.1) + unlist(QAW.fun(A = 0, W)*(1 - true_rc.out$Prd.is.1)))
# True mean under estimated optimal rule using true QAW
EYdn_QAWHat = mean(unlist(QAW.fun(A = 1, W = W)*rc.out$Prd.is.1) + unlist(QAW.fun(A = 0, W = W)*(1-rc.out$Prd.is.1)))
toreturn = list(EnYgRC = rbind(EnYgRC.nonCVTMLE, EnYgRC.CVTMLE),
E0YgRC = rbind(E0YgRC.nonCVTMLE, E0YgRC.CVTMLE),
SL.info = data.frame(EYdn_QAWHat = EYdn_QAWHat,
true_mean_Prd.is.1 = mean(true_rc.out$Prd.is.1),
est_mean_Prd.is.1 = mean(SL.odtr$rc.out$Prd.is.1),
regret = regret,
true_tauP = true_rc.out$tauP,
est_tauP = SL.odtr$rc.out$tauP,
coef = t(SL.odtr$SL.fit$coef)))
} else {
toreturn = list(EYdopt_estimates = rbind(EnYgRC.nonCVTMLE, EnYgRC.CVTMLE),
SL.odtr = SL.odtr)
}
return(toreturn)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rsaddle.R
\name{rsaddle}
\alias{rsaddle}
\title{Simulate random variables from the Extended Empirical Saddlepoint density (ESS)}
\usage{
rsaddle(
n,
X,
decay,
ml = 2,
multicore = !is.null(cluster),
cluster = NULL,
ncores = detectCores() - 1,
...
)
}
\arguments{
\item{n}{number of simulated vectors.}
\item{X}{an m by d matrix containing the data.}
\item{decay}{rate at which the ESS falls back on a normal density. Should be a positive number. See Fasiolo et al. (2016)
for details.}
\item{ml}{n random variables are generated from a Gaussian importance density with covariance matrix
\code{ml*cov(X)}. By default the inflation factor is \code{ml=2}.}
\item{multicore}{if TRUE the ESS densities corresponding the samples will be evaluated in parallel.}
\item{cluster}{an object of class \code{c("SOCKcluster", "cluster")}. This allowes the user to pass her own cluster,
which will be used if \code{multicore == TRUE}. The user has to remember to stop the cluster.}
\item{ncores}{number of cores to be used.}
\item{...}{additional arguments to be passed to \code{dsaddle}.}
}
\value{
An n by d matrix containing the simulated vectors.
}
\description{
Simulate random variables from the Extended Empirical Saddlepoint density (ESS), using importance
sampling and then resampling according to the importance weights.
}
\details{
Notice that, while importance sampling is used, the output is a matrix of unweighted samples, obtained by resampling
with probabilities proportional to the importance weights.
}
\examples{
# Simulate bivariate data, where each marginal distribution is Exp(2)
X <- matrix(rexp(2 * 1e3), 1e3, 2)
# Simulate bivariate data from a saddlepoint fitted to X
Z <- rsaddle(1000, X, decay = 0.5)
# Look at first marginal distribution
hist( Z[ , 1] )
}
\references{
Fasiolo, M., Wood, S. N., Hartig, F. and Bravington, M. V. (2016).
An Extended Empirical Saddlepoint Approximation for Intractable Likelihoods. ArXiv http://arxiv.org/abs/1601.01849.
}
\author{
Matteo Fasiolo <matteo.fasiolo@gmail.com>.
}
|
/fuzzedpackages/esaddle/man/rsaddle.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 2,161
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rsaddle.R
\name{rsaddle}
\alias{rsaddle}
\title{Simulate random variables from the Extended Empirical Saddlepoint density (ESS)}
\usage{
rsaddle(
n,
X,
decay,
ml = 2,
multicore = !is.null(cluster),
cluster = NULL,
ncores = detectCores() - 1,
...
)
}
\arguments{
\item{n}{number of simulated vectors.}
\item{X}{an m by d matrix containing the data.}
\item{decay}{rate at which the ESS falls back on a normal density. Should be a positive number. See Fasiolo et al. (2016)
for details.}
\item{ml}{n random variables are generated from a Gaussian importance density with covariance matrix
\code{ml*cov(X)}. By default the inflation factor is \code{ml=2}.}
\item{multicore}{if TRUE the ESS densities corresponding the samples will be evaluated in parallel.}
\item{cluster}{an object of class \code{c("SOCKcluster", "cluster")}. This allowes the user to pass her own cluster,
which will be used if \code{multicore == TRUE}. The user has to remember to stop the cluster.}
\item{ncores}{number of cores to be used.}
\item{...}{additional arguments to be passed to \code{dsaddle}.}
}
\value{
An n by d matrix containing the simulated vectors.
}
\description{
Simulate random variables from the Extended Empirical Saddlepoint density (ESS), using importance
sampling and then resampling according to the importance weights.
}
\details{
Notice that, while importance sampling is used, the output is a matrix of unweighted samples, obtained by resampling
with probabilities proportional to the importance weights.
}
\examples{
# Simulate bivariate data, where each marginal distribution is Exp(2)
X <- matrix(rexp(2 * 1e3), 1e3, 2)
# Simulate bivariate data from a saddlepoint fitted to X
Z <- rsaddle(1000, X, decay = 0.5)
# Look at first marginal distribution
hist( Z[ , 1] )
}
\references{
Fasiolo, M., Wood, S. N., Hartig, F. and Bravington, M. V. (2016).
An Extended Empirical Saddlepoint Approximation for Intractable Likelihoods. ArXiv http://arxiv.org/abs/1601.01849.
}
\author{
Matteo Fasiolo <matteo.fasiolo@gmail.com>.
}
|
# ======================================================================================================================
# ==== ABT MSE Diagnostics =============================================================================================
# ======================================================================================================================
Y10<-function(MSE,pp=1) apply(MSE@C[,,pp,MSE@nyears+1:10],1:2,mean)
class(Y10)<-"ABT_PM"
Y20<-function(MSE,pp=1) apply(MSE@C[,,pp,MSE@nyears+11:20],1:2,mean)
class(Y20)<-"ABT_PM"
Y30<-function(MSE,pp=1) apply(MSE@C[,,pp,MSE@nyears+21:30],1:2,mean)
class(Y30)<-"ABT_PM"
PGK<-function(MSE,pp=1) apply(MSE@F_FMSY[,,pp,MSE@nyears+1:MSE@proyears]<1 & MSE@B_BMSY[,,pp,MSE@nyears+1:MSE@proyears]>1,1:2,sum)/MSE@proyears*100
class(PGK)<-"ABT_PM"
POF<-function(MSE,pp=1) apply(MSE@F_FMSY[,,pp,MSE@nyears+1:MSE@proyears]>1,1:2,sum)/MSE@proyears*100
class(POF)<-"ABT_PM"
POFed<-function(MSE,pp=1) apply(MSE@B_BMSY[,,pp,MSE@nyears+1:MSE@proyears]<1,1:2,sum)/MSE@proyears*100
class(POFed)<-"ABT_PM"
D10<-function(MSE,pp=1){
D<-MSE@SSB[,,pp,MSE@nyears+1:10]/array(rep(MSE@SSB0proj[,pp,1:10],each=MSE@nMPs),c(MSE@nMPs,MSE@nsim,10))
apply(D,1:2,mean)
}
class(D10)<-"ABT_PM"
D20<-function(MSE,pp=1){
D<-MSE@SSB[,,pp,MSE@nyears+11:20]/array(rep(MSE@SSB0proj[,pp,11:20],each=MSE@nMPs),c(MSE@nMPs,MSE@nsim,10))
apply(D,1:2,mean)
}
class(D20)<-"ABT_PM"
D30<-function(MSE,pp=1){
D<-MSE@SSB[,,pp,MSE@nyears+21:30]/array(rep(MSE@SSB0proj[,pp,21:30],each=MSE@nMPs),c(MSE@nMPs,MSE@nsim,10))
apply(D,1:2,mean)
}
class(D30)<-"ABT_PM"
LD<-function(MSE,pp=1){
D<-MSE@SSB[,,pp,MSE@nyears+1:30]/array(rep(MSE@SSB0proj[,pp,1:30],each=MSE@nMPs),c(MSE@nMPs,MSE@nsim,30))
apply(D,1:2,min)
}
class(LD)<-"ABT_PM"
RSSB<-function(MSE,pp=1) MSE@SSB[,,pp,MSE@nyears+30]/array(rep(MSE@SSB[1,,pp,MSE@nyears+30],each=MSE@nMPs),dim=c(MSE@nMPs,MSE@nsim))
class(RSSB)<-"ABT_PM"
LRSSB<-function(MSE,pp=1) apply(MSE@SSB[,,pp,MSE@nyears+1:30]/array(rep(MSE@SSB[1,,pp,MSE@nyears+1:30],each=MSE@nMPs),dim=c(MSE@nMPs,MSE@nsim,30)),1:2,min)
class(LRSSB)<-"ABT_PM"
AAVY<-function(MSE,pp=1){
ind1<-MSE@nyears+0:29
ind<-MSE@nyears+1:30
apply(((MSE@C[,,pp,ind]-MSE@C[,,pp,ind1])^2)^0.5/MSE@C[,,pp,ind1],1:2,mean)
}
class(AAVY)<-"ABT_PM"
getperf<-function(object,bysim=F){
MSE<-object
nsim<-MSE@nsim
proyears<-MSE@proyears
nMPs<-MSE@nMPs
MPnams<-unlist(MSE@MPs)
MPnams<-paste(MPnams[(1:MSE@nMPs)*2-1],MPnams[(1:MSE@nMPs)*2],sep="-")
out<-new('list')
for(pp in 1:MSE@npop){
Y10a<-Y10(MSE,pp=pp)/1000
Y20a<-Y20(MSE,pp=pp)/1000
Y30a<-Y30(MSE,pp=pp)/1000
PGKa<-PGK(MSE,pp=pp)
POFa<-POF(MSE,pp)
POFeda<-POFed(MSE,pp)
D10a<-D10(MSE,pp)
D20a<-D20(MSE,pp)
D30a<-D30(MSE,pp)
LDa<-LD(MSE,pp)
RSSBa<-RSSB(MSE,pp)
LRSSBa<-LRSSB(MSE,pp)
AAVYa<-AAVY(MSE,pp)
out[[pp]]<-data.frame("Y10"=apply(Y10a,1,mean),
"Y20"=apply(Y20a,1,mean),
"Y30"=apply(Y30a,1,mean),
"PGK"=apply(PGKa,1,mean),
"POF"=apply(POFa,1,mean),
"POFed"=apply(POFeda,1,mean),
"D10"=apply(D10a,1,mean),
"D20"=apply(D20a,1,mean),
"D30"=apply(D30a,1,mean),
"LD"=apply(LDa,1,mean),
"RSSB"=apply(RSSBa,1,mean),
"LRSSB"=apply(LRSSBa,1,mean),
"AAVY"=apply(AAVYa,1,mean),row.names=MPnams)
}
names(out)<-MSE@Snames
out
}
sumplot<-function(dat,field,adjv=c(1,1,1),pm=2,UB=10){
perc=c(0.02,0.02,0.02)
perc[3]<-perc[3]*pm
col<-c("black","red","green","blue","orange","grey","purple","pink","brown")
coln<-match(field,names(dat))
levs<-unique(dat[,coln])
mnam<-c("Yield (% Disc. Rate)","Prob. Green Kobe","Av Ann. Var. Yield")
mind<-c(13,11,10)
for(met in 1:length(mnam)){
ymax<--1000
xlim<-c(10000,-10000)
for(i in 1:length(levs)) {
tdat<-dat[dat[,coln]==levs[i],mind[met]]
tdat<-tdat[tdat<UB&tdat>-0.001]
dd<-density(tdat,adj=adjv[met],from=0)
xt<-quantile(tdat,c(perc[met]/2,1-perc[met]))
xlim[1]<-min(xlim[1],xt[1])
xlim[2]<-max(xlim[2],xt[2])
ymax<-max(ymax, max(dd$y))
}
for(i in 1:length(levs)){
tdat<-as.numeric(dat[dat[,coln]==levs[i],mind[met]])
tdat<-tdat[tdat<UB&tdat>-0.001]
if(i==1)plot(density(tdat,adj=adjv[met],from=0),ylim=c(0,ymax),xlim=xlim,col=col[1],type='l',main=mnam[met])
if(i>1)lines(density(tdat,adj=adjv[met],from=0),col=col[i])
}
}
legend('topright',legend=levs,text.col=col[1:length(levs)],bty='n')
}
sumplot2<-function(dat,fieldv,adjv=c(1,1,1),pm=2,UB=10,refMP="UMSY_PI"){
dat<-dat[dat$MP!=refMP,]
perc=c(0.02,0.02,0.02)
perc[3]<-perc[3]*pm
col<-c("black","red","green","blue","orange","grey","purple","pink","brown")
for(ff in 1:length(fieldv)){
field<-fieldv[ff]
coln<-match(field,names(dat))
levs<-unique(dat[,coln])
mnam<-c("Yield (5% Disc. Rate)","Av Ann. Var. Yield","Prob. Green Kobe")
mind<-c(13,10,11)
for(met in 1:length(mnam)){
ymax<--1000
xlim<-c(10000,-10000)
for(i in 1:length(levs)) {
tdat<-dat[dat[,coln]==levs[i],mind[met]]
tdat<-tdat[tdat<UB&tdat>-0.001]
dd<-density(tdat,adj=adjv[met],from=0)
xt<-quantile(tdat,c(perc[met]/2,1-perc[met]))
xlim[1]<-min(xlim[1],xt[1])
xlim[2]<-max(xlim[2],xt[2])
ymax<-max(ymax, max(dd$y))
}
for(i in 1:length(levs)){
tdat<-as.numeric(dat[dat[,coln]==levs[i],mind[met]])
tdat<-tdat[tdat<UB&tdat>-0.001]
if(i==1)plot(density(tdat,adj=adjv[met],from=0),ylim=c(0,ymax),xlim=xlim,xlab="",ylab="",col=col[1],type='l',main="")
if(i>1)lines(density(tdat,adj=adjv[met],from=0),col=col[i])
}
if(ff==1)mtext(mnam[met],side=3,line=0.3)
}
legend('topright',legend=levs,text.col=col[1:length(levs)],bty='n')
}
mtext("Relative frequency",side=2,line=0.5,outer=T)
}
Tplot<-function(MSE){
MPnams<-matrix(unlist(MSE@MPs),nrow=2)
MPnamsj<-paste(MPnams[(1:MSE@nMPs)*2-1],MPnams[(1:MSE@nMPs)*2],sep="-")
par(mfrow=c(4,3),mai=c(0.5,0.5,0.25,0.05),omi=rep(0.05,4))
for(pp in 1:MSE@npop){
Y30a<-apply(Y30(MSE,pp)/1000,1,mean)
PGKa<-apply(PGK(MSE,pp),1,mean)
POFa<-apply(POF(MSE,pp),1,mean)
POFeda<-apply(POFed(MSE,pp),1,mean)
D30a<-apply(D30(MSE,pp),1,mean)
AAVYa<-apply(AAVY(MSE,pp),1,mean)
TOplt(PGKa,Y30a,MPnams[pp,],"P(Green Kobe)","Long Term Yield",0.5,NA)
TOplt(POFa,POFeda,MPnams[pp,],"P(F>FMSY)","P(B<BMSY)",0.5,0.5)
mtext(MSE@Snames[pp],3,line=0.3,font=2)
TOplt(D30a,AAVYa,MPnams[pp,],"Final depletion","AAVY",0.1,0.2)
}
Y30_1<-apply(Y30(MSE,1)/1000,1,mean)
PGK_1<-apply(PGK(MSE,1),1,mean)
POF_1<-apply(POF(MSE,1),1,mean)
POFed_1<-apply(POFed(MSE,1),1,mean)
D30_1<-apply(D30(MSE,1),1,mean)
AAVY_1<-apply(AAVY(MSE,1),1,mean)
Y30_2<-apply(Y30(MSE,pp=2)/1000,1,mean)
PGK_2<-apply(PGK(MSE,pp=2),1,mean)
POF_2<-apply(POF(MSE,2),1,mean)
POFed_2<-apply(POFed(MSE,2),1,mean)
D30_2<-apply(D30(MSE,2),1,mean)
AAVY_2<-apply(AAVY(MSE,2),1,mean)
TOplt(Y30_1,Y30_2,MPnamsj,MSE@Snames[1],MSE@Snames[2],NA,NA,"Long term Yield")
TOplt(PGK_1,PGK_2,MPnamsj,MSE@Snames[1],MSE@Snames[2],NA,NA,"P(Green Kobe)")
TOplt(POF_1,POF_2,MPnamsj,MSE@Snames[1],MSE@Snames[2],NA,NA,"P(F>FMSY)")
TOplt(POFed_1,POFed_2,MPnamsj,MSE@Snames[1],MSE@Snames[2],NA,NA,"P(B<BMSY)")
TOplt(D30_1,D30_2,MPnamsj,MSE@Snames[1],MSE@Snames[2],NA,NA,"Final depletion")
TOplt(AAVY_1,AAVY_2,MPnamsj,MSE@Snames[1],MSE@Snames[2],NA,NA,"Av. Ann. Var. Yld.")
}
TOplt<-function(x,y,MPs,xlab,ylab,xref=NA,yref=NA,main=""){
MPcols<-rep(c("black","blue","orange","green","red","grey"),20)
plot(x,y,col='white',xlab="",ylab="",
xlim=range(x)+(max(x)-min(x))/15*c(-1,1),
ylim=range(y)+(max(y)-min(y))/15*c(-1,1))
abline(h=yref,lty=2,col="grey")
abline(v=xref,lty=2,col="grey")
text(x,y,MPs,col=MPcols)
mtext(xlab,1,line=2,cex=0.85)
mtext(ylab,2,line=2,cex=0.85)
mtext(main,3,line=0.4,cex=0.85,font=2)
}
Tplot2<-function(dat,fieldv,legpos='top'){
for(ll in 1:length(fieldv))Tplot(dat,fieldv[ll],legpos)
mtext("Yield relative to MSY (5% Disc. rate)",side=2,line=0.5,outer=T)
mtext(c("Prob. green Kobe","AAVY"),side=1,at=c(0.25,0.75),line=0.8,outer=T)
}
addgg<-function(x,y,pcol='azure2'){
resx<-(max(x)-min(x))/10
resy<-(max(y)-min(y))/10
xlim<-c(min(x)-(2*resx),max(x)+(2*resx))
ylim<-c(min(y)-(2*resy),max(y)+(2*resy))
divx<-pretty(seq(xlim[1],xlim[2],length.out=20))
divy<-pretty(seq(ylim[1],ylim[2],length.out=20))
polygon(c(xlim,xlim[2:1]),rep(ylim,each=2),col=pcol)
abline(v=divx,col='white')
abline(h=divy,col='white')
}
makeTrans<-function(someColor, alpha=100){
newColor<-col2rgb(someColor)
apply(newColor, 2, function(curcoldata){rgb(red=curcoldata[1], green=curcoldata[2],
blue=curcoldata[3],alpha=alpha, maxColorValue=255)})
}
# Plot performance summary of the mse object
#setMethod("summary",
# signature(object = "MSE"),
stats<-function(object){
nsim<-object@nsim
nyears<-object@nyears
proyears<-object@proyears
nMPs<-object@nMPs
targpop<-object@targpop
C<-apply(array(object@C[,,targpop,(nyears+1):(nyears+proyears)],
c(nMPs,nsim,length(targpop),proyears)),c(1,2,4),sum)
AAVY<-array(NA,c(nMPs,nsim,proyears-1))
ind1<-as.matrix(expand.grid(1:nMPs,1:nsim,1:(proyears-1)))
ind2<-as.matrix(expand.grid(1:nMPs,1:nsim,2:proyears))
AAVY[ind1]<-((C[ind1]-C[ind2])^2)^0.5
AAVY<-apply(AAVY,1:2,mean)
Y<-apply(C[,,(proyears-4):proyears],1:2,mean)
F_FMSY<-object@F_FMSY[,,(nyears+1):(nyears+proyears)]
B_BMSY<-object@B_BMSY[,,(nyears+1):(nyears+proyears)]
Pgreen<-apply(array(as.integer(F_FMSY<1&B_BMSY>1),dim(B_BMSY)),1:2,mean)
list("Y"=Y,"AAVY"=AAVY,"Pgreen"=Pgreen,"Dep"=B_BMSY[,,proyears],"C"=C,"F_FMSY"=F_FMSY,"B_BMSY"=B_BMSY)
}#)
anim8mov<-function(OM,outfile='bftanimov'){
#animov<-function(stat){
fac<-4
stat<-tinter(t(stat),fac)
UB<-1
LB<-0
nsim<-nrow(stat)
Drive<-"D"
setwd(paste(Drive,':/HerringMSE/Data/',sep=""))
Lat<-c(48,55)
Lon<-c(-133,-122)
gridcol<-'grey'
axiscol<-'black'
landcol<-'grey'
Regions<-c("Straight of Georgia","W. Coast Van. Island","Central Coast", "Haida Gwaii", "Prince Rupert District")
RegCodes<-c("SOG","WCVI","CC","HG","PRD")
MAs<-importShapefile('Assessment_Regions_2W_27_March_2009',readDBF=F)
MAnam<-c("PRD","CC","SOG","WCVI","A27","2W","HG")
MAs<-subset(MAs,MAs$PID%in%match(RegCodes,MAnam))
MAs$PID<-match(MAnam[MAs$PID],RegCodes)
dcolgrad<-500
dcols<-rainbow(dcolgrad+2,start=0.05,end=0.4)
statcol<-1+ceiling((stat^0.5-LB^0.5)/(UB^0.5-LB^0.5)*dcolgrad)
paste("#ff0000",floor(stat*0.99*100),sep="")
statcol<-array(sprintf("#ff0000%02d", floor(stat*0.99*100)),dim(stat))
for(j in 1:nsim){
plot(Lon,Lat,col="white",xlab="",ylab="",main="",axes=F)
xlimz<-c(-1000,1000)
ylimz<-xlimz
polygon(rep(xlimz,each=2),c(ylimz,ylimz[2:1]),col='azure',border='azure')
# x<-(-122:-135) # the global 1 deg longitudes
#y<-47:56 # the global 1 deg latitudes
#abline(h=y,col=gridcol) # add the 1 degree latitude lines
#abline(v=x,col=gridcol) # add the 1 degree longitude lines
#axis(1,at=x,labels=as.character(x),col=gridcol,col.axis=axiscol,cex.axis=1)
#axis(2,at=y,labels=as.character(y),col=gridcol,col.axis=axiscol,cex.axis=1)
#mtext(expression(paste("Longitude ",~degree~W,sep="")),side=1,line=2.5,outer=F,font=2,cex=14/12)
#mtext(expression(paste("Latitude ",~degree~N,sep="")),side=2,line=2.5,outer=F,font=2,cex=14/12)
tompoly(MAs,acol=statcol[j,],lcol=statcol[j,])
map(database = "worldHires", xlim=Lon, ylim=Lat,resolution = 0,add=T,fill=T,col=landcol)
map(database = "worldHires", xlim=Lon, ylim=Lat,resolution = 0,add=T,col=landcol)
legend('topright',legend=ceiling(j/5),text.col='white',cex=1.8,bty='n')
ani.pause()
}
#}
}
|
/R_package/ABTMSE/inst/Diagnostics.R
|
no_license
|
pl202/abft-mse
|
R
| false
| false
| 12,399
|
r
|
# ======================================================================================================================
# ==== ABT MSE Diagnostics =============================================================================================
# ======================================================================================================================
Y10<-function(MSE,pp=1) apply(MSE@C[,,pp,MSE@nyears+1:10],1:2,mean)
class(Y10)<-"ABT_PM"
Y20<-function(MSE,pp=1) apply(MSE@C[,,pp,MSE@nyears+11:20],1:2,mean)
class(Y20)<-"ABT_PM"
Y30<-function(MSE,pp=1) apply(MSE@C[,,pp,MSE@nyears+21:30],1:2,mean)
class(Y30)<-"ABT_PM"
PGK<-function(MSE,pp=1) apply(MSE@F_FMSY[,,pp,MSE@nyears+1:MSE@proyears]<1 & MSE@B_BMSY[,,pp,MSE@nyears+1:MSE@proyears]>1,1:2,sum)/MSE@proyears*100
class(PGK)<-"ABT_PM"
POF<-function(MSE,pp=1) apply(MSE@F_FMSY[,,pp,MSE@nyears+1:MSE@proyears]>1,1:2,sum)/MSE@proyears*100
class(POF)<-"ABT_PM"
POFed<-function(MSE,pp=1) apply(MSE@B_BMSY[,,pp,MSE@nyears+1:MSE@proyears]<1,1:2,sum)/MSE@proyears*100
class(POFed)<-"ABT_PM"
D10<-function(MSE,pp=1){
D<-MSE@SSB[,,pp,MSE@nyears+1:10]/array(rep(MSE@SSB0proj[,pp,1:10],each=MSE@nMPs),c(MSE@nMPs,MSE@nsim,10))
apply(D,1:2,mean)
}
class(D10)<-"ABT_PM"
D20<-function(MSE,pp=1){
D<-MSE@SSB[,,pp,MSE@nyears+11:20]/array(rep(MSE@SSB0proj[,pp,11:20],each=MSE@nMPs),c(MSE@nMPs,MSE@nsim,10))
apply(D,1:2,mean)
}
class(D20)<-"ABT_PM"
D30<-function(MSE,pp=1){
D<-MSE@SSB[,,pp,MSE@nyears+21:30]/array(rep(MSE@SSB0proj[,pp,21:30],each=MSE@nMPs),c(MSE@nMPs,MSE@nsim,10))
apply(D,1:2,mean)
}
class(D30)<-"ABT_PM"
LD<-function(MSE,pp=1){
D<-MSE@SSB[,,pp,MSE@nyears+1:30]/array(rep(MSE@SSB0proj[,pp,1:30],each=MSE@nMPs),c(MSE@nMPs,MSE@nsim,30))
apply(D,1:2,min)
}
class(LD)<-"ABT_PM"
RSSB<-function(MSE,pp=1) MSE@SSB[,,pp,MSE@nyears+30]/array(rep(MSE@SSB[1,,pp,MSE@nyears+30],each=MSE@nMPs),dim=c(MSE@nMPs,MSE@nsim))
class(RSSB)<-"ABT_PM"
LRSSB<-function(MSE,pp=1) apply(MSE@SSB[,,pp,MSE@nyears+1:30]/array(rep(MSE@SSB[1,,pp,MSE@nyears+1:30],each=MSE@nMPs),dim=c(MSE@nMPs,MSE@nsim,30)),1:2,min)
class(LRSSB)<-"ABT_PM"
AAVY<-function(MSE,pp=1){
ind1<-MSE@nyears+0:29
ind<-MSE@nyears+1:30
apply(((MSE@C[,,pp,ind]-MSE@C[,,pp,ind1])^2)^0.5/MSE@C[,,pp,ind1],1:2,mean)
}
class(AAVY)<-"ABT_PM"
getperf<-function(object,bysim=F){
MSE<-object
nsim<-MSE@nsim
proyears<-MSE@proyears
nMPs<-MSE@nMPs
MPnams<-unlist(MSE@MPs)
MPnams<-paste(MPnams[(1:MSE@nMPs)*2-1],MPnams[(1:MSE@nMPs)*2],sep="-")
out<-new('list')
for(pp in 1:MSE@npop){
Y10a<-Y10(MSE,pp=pp)/1000
Y20a<-Y20(MSE,pp=pp)/1000
Y30a<-Y30(MSE,pp=pp)/1000
PGKa<-PGK(MSE,pp=pp)
POFa<-POF(MSE,pp)
POFeda<-POFed(MSE,pp)
D10a<-D10(MSE,pp)
D20a<-D20(MSE,pp)
D30a<-D30(MSE,pp)
LDa<-LD(MSE,pp)
RSSBa<-RSSB(MSE,pp)
LRSSBa<-LRSSB(MSE,pp)
AAVYa<-AAVY(MSE,pp)
out[[pp]]<-data.frame("Y10"=apply(Y10a,1,mean),
"Y20"=apply(Y20a,1,mean),
"Y30"=apply(Y30a,1,mean),
"PGK"=apply(PGKa,1,mean),
"POF"=apply(POFa,1,mean),
"POFed"=apply(POFeda,1,mean),
"D10"=apply(D10a,1,mean),
"D20"=apply(D20a,1,mean),
"D30"=apply(D30a,1,mean),
"LD"=apply(LDa,1,mean),
"RSSB"=apply(RSSBa,1,mean),
"LRSSB"=apply(LRSSBa,1,mean),
"AAVY"=apply(AAVYa,1,mean),row.names=MPnams)
}
names(out)<-MSE@Snames
out
}
sumplot<-function(dat,field,adjv=c(1,1,1),pm=2,UB=10){
perc=c(0.02,0.02,0.02)
perc[3]<-perc[3]*pm
col<-c("black","red","green","blue","orange","grey","purple","pink","brown")
coln<-match(field,names(dat))
levs<-unique(dat[,coln])
mnam<-c("Yield (% Disc. Rate)","Prob. Green Kobe","Av Ann. Var. Yield")
mind<-c(13,11,10)
for(met in 1:length(mnam)){
ymax<--1000
xlim<-c(10000,-10000)
for(i in 1:length(levs)) {
tdat<-dat[dat[,coln]==levs[i],mind[met]]
tdat<-tdat[tdat<UB&tdat>-0.001]
dd<-density(tdat,adj=adjv[met],from=0)
xt<-quantile(tdat,c(perc[met]/2,1-perc[met]))
xlim[1]<-min(xlim[1],xt[1])
xlim[2]<-max(xlim[2],xt[2])
ymax<-max(ymax, max(dd$y))
}
for(i in 1:length(levs)){
tdat<-as.numeric(dat[dat[,coln]==levs[i],mind[met]])
tdat<-tdat[tdat<UB&tdat>-0.001]
if(i==1)plot(density(tdat,adj=adjv[met],from=0),ylim=c(0,ymax),xlim=xlim,col=col[1],type='l',main=mnam[met])
if(i>1)lines(density(tdat,adj=adjv[met],from=0),col=col[i])
}
}
legend('topright',legend=levs,text.col=col[1:length(levs)],bty='n')
}
sumplot2<-function(dat,fieldv,adjv=c(1,1,1),pm=2,UB=10,refMP="UMSY_PI"){
dat<-dat[dat$MP!=refMP,]
perc=c(0.02,0.02,0.02)
perc[3]<-perc[3]*pm
col<-c("black","red","green","blue","orange","grey","purple","pink","brown")
for(ff in 1:length(fieldv)){
field<-fieldv[ff]
coln<-match(field,names(dat))
levs<-unique(dat[,coln])
mnam<-c("Yield (5% Disc. Rate)","Av Ann. Var. Yield","Prob. Green Kobe")
mind<-c(13,10,11)
for(met in 1:length(mnam)){
ymax<--1000
xlim<-c(10000,-10000)
for(i in 1:length(levs)) {
tdat<-dat[dat[,coln]==levs[i],mind[met]]
tdat<-tdat[tdat<UB&tdat>-0.001]
dd<-density(tdat,adj=adjv[met],from=0)
xt<-quantile(tdat,c(perc[met]/2,1-perc[met]))
xlim[1]<-min(xlim[1],xt[1])
xlim[2]<-max(xlim[2],xt[2])
ymax<-max(ymax, max(dd$y))
}
for(i in 1:length(levs)){
tdat<-as.numeric(dat[dat[,coln]==levs[i],mind[met]])
tdat<-tdat[tdat<UB&tdat>-0.001]
if(i==1)plot(density(tdat,adj=adjv[met],from=0),ylim=c(0,ymax),xlim=xlim,xlab="",ylab="",col=col[1],type='l',main="")
if(i>1)lines(density(tdat,adj=adjv[met],from=0),col=col[i])
}
if(ff==1)mtext(mnam[met],side=3,line=0.3)
}
legend('topright',legend=levs,text.col=col[1:length(levs)],bty='n')
}
mtext("Relative frequency",side=2,line=0.5,outer=T)
}
Tplot<-function(MSE){
MPnams<-matrix(unlist(MSE@MPs),nrow=2)
MPnamsj<-paste(MPnams[(1:MSE@nMPs)*2-1],MPnams[(1:MSE@nMPs)*2],sep="-")
par(mfrow=c(4,3),mai=c(0.5,0.5,0.25,0.05),omi=rep(0.05,4))
for(pp in 1:MSE@npop){
Y30a<-apply(Y30(MSE,pp)/1000,1,mean)
PGKa<-apply(PGK(MSE,pp),1,mean)
POFa<-apply(POF(MSE,pp),1,mean)
POFeda<-apply(POFed(MSE,pp),1,mean)
D30a<-apply(D30(MSE,pp),1,mean)
AAVYa<-apply(AAVY(MSE,pp),1,mean)
TOplt(PGKa,Y30a,MPnams[pp,],"P(Green Kobe)","Long Term Yield",0.5,NA)
TOplt(POFa,POFeda,MPnams[pp,],"P(F>FMSY)","P(B<BMSY)",0.5,0.5)
mtext(MSE@Snames[pp],3,line=0.3,font=2)
TOplt(D30a,AAVYa,MPnams[pp,],"Final depletion","AAVY",0.1,0.2)
}
Y30_1<-apply(Y30(MSE,1)/1000,1,mean)
PGK_1<-apply(PGK(MSE,1),1,mean)
POF_1<-apply(POF(MSE,1),1,mean)
POFed_1<-apply(POFed(MSE,1),1,mean)
D30_1<-apply(D30(MSE,1),1,mean)
AAVY_1<-apply(AAVY(MSE,1),1,mean)
Y30_2<-apply(Y30(MSE,pp=2)/1000,1,mean)
PGK_2<-apply(PGK(MSE,pp=2),1,mean)
POF_2<-apply(POF(MSE,2),1,mean)
POFed_2<-apply(POFed(MSE,2),1,mean)
D30_2<-apply(D30(MSE,2),1,mean)
AAVY_2<-apply(AAVY(MSE,2),1,mean)
TOplt(Y30_1,Y30_2,MPnamsj,MSE@Snames[1],MSE@Snames[2],NA,NA,"Long term Yield")
TOplt(PGK_1,PGK_2,MPnamsj,MSE@Snames[1],MSE@Snames[2],NA,NA,"P(Green Kobe)")
TOplt(POF_1,POF_2,MPnamsj,MSE@Snames[1],MSE@Snames[2],NA,NA,"P(F>FMSY)")
TOplt(POFed_1,POFed_2,MPnamsj,MSE@Snames[1],MSE@Snames[2],NA,NA,"P(B<BMSY)")
TOplt(D30_1,D30_2,MPnamsj,MSE@Snames[1],MSE@Snames[2],NA,NA,"Final depletion")
TOplt(AAVY_1,AAVY_2,MPnamsj,MSE@Snames[1],MSE@Snames[2],NA,NA,"Av. Ann. Var. Yld.")
}
TOplt<-function(x,y,MPs,xlab,ylab,xref=NA,yref=NA,main=""){
MPcols<-rep(c("black","blue","orange","green","red","grey"),20)
plot(x,y,col='white',xlab="",ylab="",
xlim=range(x)+(max(x)-min(x))/15*c(-1,1),
ylim=range(y)+(max(y)-min(y))/15*c(-1,1))
abline(h=yref,lty=2,col="grey")
abline(v=xref,lty=2,col="grey")
text(x,y,MPs,col=MPcols)
mtext(xlab,1,line=2,cex=0.85)
mtext(ylab,2,line=2,cex=0.85)
mtext(main,3,line=0.4,cex=0.85,font=2)
}
Tplot2<-function(dat,fieldv,legpos='top'){
for(ll in 1:length(fieldv))Tplot(dat,fieldv[ll],legpos)
mtext("Yield relative to MSY (5% Disc. rate)",side=2,line=0.5,outer=T)
mtext(c("Prob. green Kobe","AAVY"),side=1,at=c(0.25,0.75),line=0.8,outer=T)
}
addgg<-function(x,y,pcol='azure2'){
resx<-(max(x)-min(x))/10
resy<-(max(y)-min(y))/10
xlim<-c(min(x)-(2*resx),max(x)+(2*resx))
ylim<-c(min(y)-(2*resy),max(y)+(2*resy))
divx<-pretty(seq(xlim[1],xlim[2],length.out=20))
divy<-pretty(seq(ylim[1],ylim[2],length.out=20))
polygon(c(xlim,xlim[2:1]),rep(ylim,each=2),col=pcol)
abline(v=divx,col='white')
abline(h=divy,col='white')
}
makeTrans<-function(someColor, alpha=100){
newColor<-col2rgb(someColor)
apply(newColor, 2, function(curcoldata){rgb(red=curcoldata[1], green=curcoldata[2],
blue=curcoldata[3],alpha=alpha, maxColorValue=255)})
}
# Plot performance summary of the mse object
#setMethod("summary",
# signature(object = "MSE"),
stats<-function(object){
nsim<-object@nsim
nyears<-object@nyears
proyears<-object@proyears
nMPs<-object@nMPs
targpop<-object@targpop
C<-apply(array(object@C[,,targpop,(nyears+1):(nyears+proyears)],
c(nMPs,nsim,length(targpop),proyears)),c(1,2,4),sum)
AAVY<-array(NA,c(nMPs,nsim,proyears-1))
ind1<-as.matrix(expand.grid(1:nMPs,1:nsim,1:(proyears-1)))
ind2<-as.matrix(expand.grid(1:nMPs,1:nsim,2:proyears))
AAVY[ind1]<-((C[ind1]-C[ind2])^2)^0.5
AAVY<-apply(AAVY,1:2,mean)
Y<-apply(C[,,(proyears-4):proyears],1:2,mean)
F_FMSY<-object@F_FMSY[,,(nyears+1):(nyears+proyears)]
B_BMSY<-object@B_BMSY[,,(nyears+1):(nyears+proyears)]
Pgreen<-apply(array(as.integer(F_FMSY<1&B_BMSY>1),dim(B_BMSY)),1:2,mean)
list("Y"=Y,"AAVY"=AAVY,"Pgreen"=Pgreen,"Dep"=B_BMSY[,,proyears],"C"=C,"F_FMSY"=F_FMSY,"B_BMSY"=B_BMSY)
}#)
anim8mov<-function(OM,outfile='bftanimov'){
#animov<-function(stat){
fac<-4
stat<-tinter(t(stat),fac)
UB<-1
LB<-0
nsim<-nrow(stat)
Drive<-"D"
setwd(paste(Drive,':/HerringMSE/Data/',sep=""))
Lat<-c(48,55)
Lon<-c(-133,-122)
gridcol<-'grey'
axiscol<-'black'
landcol<-'grey'
Regions<-c("Straight of Georgia","W. Coast Van. Island","Central Coast", "Haida Gwaii", "Prince Rupert District")
RegCodes<-c("SOG","WCVI","CC","HG","PRD")
MAs<-importShapefile('Assessment_Regions_2W_27_March_2009',readDBF=F)
MAnam<-c("PRD","CC","SOG","WCVI","A27","2W","HG")
MAs<-subset(MAs,MAs$PID%in%match(RegCodes,MAnam))
MAs$PID<-match(MAnam[MAs$PID],RegCodes)
dcolgrad<-500
dcols<-rainbow(dcolgrad+2,start=0.05,end=0.4)
statcol<-1+ceiling((stat^0.5-LB^0.5)/(UB^0.5-LB^0.5)*dcolgrad)
paste("#ff0000",floor(stat*0.99*100),sep="")
statcol<-array(sprintf("#ff0000%02d", floor(stat*0.99*100)),dim(stat))
for(j in 1:nsim){
plot(Lon,Lat,col="white",xlab="",ylab="",main="",axes=F)
xlimz<-c(-1000,1000)
ylimz<-xlimz
polygon(rep(xlimz,each=2),c(ylimz,ylimz[2:1]),col='azure',border='azure')
# x<-(-122:-135) # the global 1 deg longitudes
#y<-47:56 # the global 1 deg latitudes
#abline(h=y,col=gridcol) # add the 1 degree latitude lines
#abline(v=x,col=gridcol) # add the 1 degree longitude lines
#axis(1,at=x,labels=as.character(x),col=gridcol,col.axis=axiscol,cex.axis=1)
#axis(2,at=y,labels=as.character(y),col=gridcol,col.axis=axiscol,cex.axis=1)
#mtext(expression(paste("Longitude ",~degree~W,sep="")),side=1,line=2.5,outer=F,font=2,cex=14/12)
#mtext(expression(paste("Latitude ",~degree~N,sep="")),side=2,line=2.5,outer=F,font=2,cex=14/12)
tompoly(MAs,acol=statcol[j,],lcol=statcol[j,])
map(database = "worldHires", xlim=Lon, ylim=Lat,resolution = 0,add=T,fill=T,col=landcol)
map(database = "worldHires", xlim=Lon, ylim=Lat,resolution = 0,add=T,col=landcol)
legend('topright',legend=ceiling(j/5),text.col='white',cex=1.8,bty='n')
ani.pause()
}
#}
}
|
#Perform differential expression analysis of spf transcriptional data (Beisel and Storz 2011)
#Work directory should be the Miscellaneous_scripts folder - change accordingly
#Read table mapping E. coli genes and microarray probes
genes.probes.table<-read.table("../Miscellaneous_data/Spot 42_transcriptional_data/ecoli_genechip2_genes_to_probes.txt",header=2)
#Verify that there is a single probe per gene (limited to genes present in the original expression compendium used for inference)
#Load expression matrix
load("../Inferelator_output_files/Ecoli_8sRNAs/params_and_input.RData")
ecoli.genes<-rownames(IN$exp.mat)
probe.counts<-c()
for(i in ecoli.genes)
{
current.gene<-strsplit(i,"_")[[1]][2]
gene.positions<-grep(current.gene,genes.probes.table$gene)
probe.counts<-c(probe.counts,length(gene.positions))
}
#There are 3818 genes (out of 4297) with one probe in the array
#Read the expression data (downloaded from NCBI GEO - accesion number GSE24875)
spf.matrix<-read.csv("../Miscellaneous_data/Spot 42_transcriptional_data/Spot 42_microarray_normalized_raw_data_Beisel_and _Storz_2011_GSE24875.csv",row.names=1)
#Replace probes IDs with gene names
new.spf.expression.matrix<-c()
genes.present.in.matrix<-c()
for(i in ecoli.genes)
{
current.gene<-strsplit(i,"_")[[1]][2]
gene.position<-grep(current.gene,genes.probes.table[,1])
#If the current gene was found
if(length(gene.position)>0)
{
current.probe<-as.character(genes.probes.table[gene.position,2])
current.probe.position<-which(rownames(spf.matrix)==current.probe)
new.spf.expression.matrix<-rbind(new.spf.expression.matrix,spf.matrix[current.probe.position,])
genes.present.in.matrix<-c(genes.present.in.matrix,current.gene)
}
}
#Name rows in the new matrix
rownames(new.spf.expression.matrix)<-genes.present.in.matrix
#Remove genes that were absent (indicated with an "A") in any of the six experiments
genes.presence<-sapply(1:nrow(new.spf.expression.matrix),function(x){length(which(new.spf.expression.matrix[x,seq(2,12,by=2)]=="P"))})
new.spf.expression.matrix2<-new.spf.expression.matrix[which(genes.presence == 6),]
#Delete the columns with the presence/absence information
new.spf.expression.matrix2<-new.spf.expression.matrix2[,-1*seq(from=2,to=12,by=2)]
#Perform differential expression analysis using bayesT (Baldi and Long, 2001)
#This step requires the cyberT R source code available at http://cybert.ics.uci.edu
source("../Rcode_Bayesian_Ttest/cyberTtest.R")
library("multtest")
#Run differential expression analysis
diff.exp.analysis<-bayesT(new.spf.expression.matrix2,numC=3,numE=3,bayes=T,conf=7,ppde=T, doMulttest = T)
#Define diff. exp. genes (DEGs)
DEGs_spf<-rownames(diff.exp.analysis)[which(diff.exp.analysis$pVal <= 0.01)]
DEGs_spf<-DEGs_spf[order(DEGs_spf)]
write.csv(file="../Miscellaneous_data/Spot 42_transcriptional_data/spf_diff_exp_analysys_output.csv",diff.exp.analysis[DEGs_spf,],quote = F)
|
/Miscellaneous_scripts/differential_expression_analysis_Spot42.R
|
no_license
|
marioluisao/sRNA_networks
|
R
| false
| false
| 2,925
|
r
|
#Perform differential expression analysis of spf transcriptional data (Beisel and Storz 2011)
#Work directory should be the Miscellaneous_scripts folder - change accordingly
#Read table mapping E. coli genes and microarray probes
genes.probes.table<-read.table("../Miscellaneous_data/Spot 42_transcriptional_data/ecoli_genechip2_genes_to_probes.txt",header=2)
#Verify that there is a single probe per gene (limited to genes present in the original expression compendium used for inference)
#Load expression matrix
load("../Inferelator_output_files/Ecoli_8sRNAs/params_and_input.RData")
ecoli.genes<-rownames(IN$exp.mat)
probe.counts<-c()
for(i in ecoli.genes)
{
current.gene<-strsplit(i,"_")[[1]][2]
gene.positions<-grep(current.gene,genes.probes.table$gene)
probe.counts<-c(probe.counts,length(gene.positions))
}
#There are 3818 genes (out of 4297) with one probe in the array
#Read the expression data (downloaded from NCBI GEO - accesion number GSE24875)
spf.matrix<-read.csv("../Miscellaneous_data/Spot 42_transcriptional_data/Spot 42_microarray_normalized_raw_data_Beisel_and _Storz_2011_GSE24875.csv",row.names=1)
#Replace probes IDs with gene names
new.spf.expression.matrix<-c()
genes.present.in.matrix<-c()
for(i in ecoli.genes)
{
current.gene<-strsplit(i,"_")[[1]][2]
gene.position<-grep(current.gene,genes.probes.table[,1])
#If the current gene was found
if(length(gene.position)>0)
{
current.probe<-as.character(genes.probes.table[gene.position,2])
current.probe.position<-which(rownames(spf.matrix)==current.probe)
new.spf.expression.matrix<-rbind(new.spf.expression.matrix,spf.matrix[current.probe.position,])
genes.present.in.matrix<-c(genes.present.in.matrix,current.gene)
}
}
#Name rows in the new matrix
rownames(new.spf.expression.matrix)<-genes.present.in.matrix
#Remove genes that were absent (indicated with an "A") in any of the six experiments
genes.presence<-sapply(1:nrow(new.spf.expression.matrix),function(x){length(which(new.spf.expression.matrix[x,seq(2,12,by=2)]=="P"))})
new.spf.expression.matrix2<-new.spf.expression.matrix[which(genes.presence == 6),]
#Delete the columns with the presence/absence information
new.spf.expression.matrix2<-new.spf.expression.matrix2[,-1*seq(from=2,to=12,by=2)]
#Perform differential expression analysis using bayesT (Baldi and Long, 2001)
#This step requires the cyberT R source code available at http://cybert.ics.uci.edu
source("../Rcode_Bayesian_Ttest/cyberTtest.R")
library("multtest")
#Run differential expression analysis
diff.exp.analysis<-bayesT(new.spf.expression.matrix2,numC=3,numE=3,bayes=T,conf=7,ppde=T, doMulttest = T)
#Define diff. exp. genes (DEGs)
DEGs_spf<-rownames(diff.exp.analysis)[which(diff.exp.analysis$pVal <= 0.01)]
DEGs_spf<-DEGs_spf[order(DEGs_spf)]
write.csv(file="../Miscellaneous_data/Spot 42_transcriptional_data/spf_diff_exp_analysys_output.csv",diff.exp.analysis[DEGs_spf,],quote = F)
|
setwd("C:/Users/kotha020/Dropbox/PostdocProjects/FreshLeafModels")
library(spectrolab)
library(ggplot2)
library(FNN)
library(lme4)
library(ggpubr)
#################################
## import TRY data
## TRY trait number reference
# SLA no petiole: 3115*
# LDMC: 47*
# Leaf N: 14*
# Leaf C: 13*
# Solubles: 85*
# Hemicellulose: 94*
# Cellulose: 92*
# Lignin: 87*
# Chlorophyll: 164*
# Chlorophyll a: 3474*
# Chlorophyll b: 3475*
# Carotenoids: 809*
# Al: 249*
# B: 250*
# Ca: 252*
# Cu: 255*
# Fe: 256*
# K: 44*
# Mg: 257*
# Mn: 258*
# Na: 260*
# P: 15*
# Zn: 268*
# Phenols: 147
# Tannins: 148
# ## leaf structure -- includes LDMC and SLA (TRY request 14553)
# TRY_struc<-read.table("TraitData/TRY/TRYStructure/14553.txt",
# sep = "\t",fill=T,header=T,quote="")
#
# TRY_struc_sub<-TRY_struc[which(TRY_struc$TraitID %in% c(3115,47)),]
# TRY_struc_sub$StdValue<-as.numeric(as.character(TRY_struc_sub$StdValue))
# TRY_struc_sub$ErrorRisk<-as.numeric(as.character(TRY_struc_sub$ErrorRisk))
# TRY_struc_sub<-TRY_struc_sub[!is.na(TRY_struc_sub$StdValue),]
# TRY_struc_sub<-TRY_struc_sub[which(TRY_struc_sub$ErrorRisk<3),]
# TRY_struc_sub<-TRY_struc_sub[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# TRY_LDMC<-TRY_struc_sub[which(TRY_struc_sub$TraitID==47),]
# TRY_SLA<-TRY_struc_sub[which(TRY_struc_sub$TraitID==3115),]
#
# write.csv(TRY_LDMC,"TraitData/TRY/TRY_LDMC.csv")
# write.csv(TRY_SLA,"TraitData/TRY/TRY_SLA.csv")
#
# ## leaf macronutrients -- includes C, N, and P (TRY request 14554)
# TRY_CNP<-read.table("TraitData/TRY/TRYNutrients/14554.txt",
# sep = "\t",fill=T,header=T,quote = "")
#
# TRY_CNP_sub<-TRY_CNP[which(TRY_CNP$TraitID %in% 13:15),]
# TRY_CNP_sub$StdValue<-as.numeric(as.character(TRY_CNP_sub$StdValue))
# TRY_CNP_sub$ErrorRisk<-as.numeric(as.character(TRY_CNP_sub$ErrorRisk))
# TRY_CNP_sub<-TRY_CNP_sub[!is.na(TRY_CNP_sub$StdValue),]
# TRY_CNP_sub<-TRY_CNP_sub[which(TRY_CNP_sub$ErrorRisk<3),]
# TRY_CNP_sub<-TRY_CNP_sub[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# TRY_C<-TRY_CNP_sub[which(TRY_CNP_sub$TraitID==13),]
# TRY_N<-TRY_CNP_sub[which(TRY_CNP_sub$TraitID==14),]
# TRY_P<-TRY_CNP_sub[which(TRY_CNP_sub$TraitID==15),]
#
# write.csv(TRY_C,"TraitData/TRY/TRY_C.csv")
# write.csv(TRY_N,"TraitData/TRY/TRY_N.csv")
# write.csv(TRY_P,"TraitData/TRY/TRY_P.csv")
#
# ## leaf carbon fractions (TRY request 14555)
# TRY_CFrac<-read.table("TraitData/TRY/TRYCFrac/14555.txt",
# sep = "\t",fill=T,header=T,quote = "")
#
# TRY_CFrac_sub<-TRY_CFrac[which(TRY_CFrac$TraitID %in% c(85,87,92,94)),]
# TRY_CFrac_sub$StdValue<-as.numeric(as.character(TRY_CFrac_sub$StdValue))
# TRY_CFrac_sub$ErrorRisk<-as.numeric(as.character(TRY_CFrac_sub$ErrorRisk))
# TRY_CFrac_sub<-TRY_CFrac_sub[!is.na(TRY_CFrac_sub$StdValue),]
# TRY_CFrac_sub<-TRY_CFrac_sub[which(TRY_CFrac_sub$ErrorRisk<3),]
# TRY_CFrac_sub<-TRY_CFrac_sub[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# TRY_solubles<-TRY_CFrac_sub[which(TRY_CFrac_sub$TraitID==85),]
# TRY_lignin<-TRY_CFrac_sub[which(TRY_CFrac_sub$TraitID==87),]
# TRY_cellulose<-TRY_CFrac_sub[which(TRY_CFrac_sub$TraitID==92),]
# TRY_hemicellulose<-TRY_CFrac_sub[which(TRY_CFrac_sub$TraitID==94),]
#
# write.csv(TRY_solubles,"TraitData/TRY/TRY_solubles.csv")
# write.csv(TRY_lignin,"TraitData/TRY/TRY_lignin.csv")
# write.csv(TRY_cellulose,"TraitData/TRY/TRY_cellulose.csv")
# write.csv(TRY_hemicellulose,"TraitData/TRY/TRY_hemicellulose.csv")
#
## leaf pigments (TRY request 14556)
# TRY_pigments<-read.table("TraitData/TRY/TRYPigments/14556.txt",
# sep = "\t",fill=T,header=T,quote = "")
#
# TRY_pigments_sub<-TRY_pigments[which(TRY_pigments$TraitID %in% c(164,809,3474,3475)),]
# TRY_pigments_sub$OrigValueStr<-as.numeric(as.character(TRY_pigments_sub$OrigValueStr))
# TRY_pigments_sub$StdValue<-as.numeric(as.character(TRY_pigments_sub$StdValue))
# TRY_pigments_sub$ErrorRisk<-as.numeric(as.character(TRY_pigments_sub$ErrorRisk))
## all records for ChlA, ChlB, and car have only original values
## and are missing error risk assessments,]
# TRY_Chl<-TRY_pigments_sub[which(TRY_pigments_sub$TraitID==164),]
# TRY_Chl<-TRY_Chl[which(TRY_Chl$ErrorRisk<3),]
# TRY_Chl<-TRY_Chl[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# micro_ChlA<-which(TRY_ChlA$OrigUnitStr %in% c("micro g / g","micro g/g"))
# TRY_ChlA<-TRY_pigments_sub[which(TRY_pigments_sub$TraitID==3474),]
# TRY_ChlA$StdValue<-TRY_ChlA$OrigValueStr
# TRY_ChlA$StdValue[micro_chlA]<-TRY_ChlA$OrigValueStr[micro_chlA]/1000
# TRY_ChlA$UnitName<-"mg g-1"
# TRY_ChlA<-TRY_ChlA[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
#
# micro_ChlB<-which(TRY_ChlB$OrigUnitStr %in% c("micro g / g","micro g/g"))
# TRY_ChlB<-TRY_pigments_sub[which(TRY_pigments_sub$TraitID==3475),]
# TRY_ChlB$StdValue<-TRY_ChlB$OrigValueStr
# TRY_ChlB$StdValue[micro_ChlB]<-TRY_ChlB$OrigValueStr[micro_ChlB]/1000
# TRY_ChlB$UnitName<-"mg g-1"
# TRY_ChlB<-TRY_ChlB[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# micro_Car<-which(TRY_Car$OrigUnitStr %in% c("micro g / g","micro g/g"))
# TRY_Car<-TRY_pigments_sub[which(TRY_pigments_sub$TraitID==809),]
# TRY_Car$StdValue<-TRY_Car$OrigValueStr
# TRY_Car$StdValue[micro_Car]<-TRY_Car$OrigValueStr[micro_Car]/1000
# TRY_Car$UnitName<-"mg g-1"
# TRY_Car<-TRY_Car[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# write.csv(TRY_Chl,"TraitData/TRY/TRY_Chl.csv")
# write.csv(TRY_ChlA,"TraitData/TRY/TRY_ChlA.csv")
# write.csv(TRY_ChlB,"TraitData/TRY/TRY_ChlB.csv")
# write.csv(TRY_Car,"TraitData/TRY/TRY_Car.csv")
#
# ## leaf micronutrients (TRY request 14557)
# TRY_Micro<-read.table("TraitData/TRY/TRYMicro/14557.txt",
# sep = "\t",fill=T,header=T,quote = "")
#
# TRY_Micro_sub<-TRY_Micro[which(TRY_Micro$TraitID %in% c(249,250,252,255,256,44,
# 257,258,260,268)),]
# TRY_Micro_sub$StdValue<-as.numeric(as.character(TRY_Micro_sub$StdValue))
# TRY_Micro_sub$ErrorRisk<-as.numeric(as.character(TRY_Micro_sub$ErrorRisk))
# TRY_Micro_sub<-TRY_Micro_sub[!is.na(TRY_Micro_sub$StdValue),]
# TRY_Micro_sub<-TRY_Micro_sub[which(TRY_Micro_sub$ErrorRisk<3),]
# TRY_Micro_sub<-TRY_Micro_sub[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# TRY_Al<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==249),]
# TRY_B<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==250),]
# TRY_Ca<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==252),]
# TRY_Cu<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==255),]
# TRY_Fe<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==256),]
# TRY_K<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==44),]
# TRY_Mg<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==257),]
# TRY_Mn<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==258),]
# TRY_Na<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==260),]
# TRY_Zn<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==268),]
#
# write.csv(TRY_Al,"TraitData/TRY/TRY_Al.csv")
# write.csv(TRY_B,"TraitData/TRY/TRY_B.csv")
# write.csv(TRY_Ca,"TraitData/TRY/TRY_Ca.csv")
# write.csv(TRY_Cu,"TraitData/TRY/TRY_Cu.csv")
# write.csv(TRY_Fe,"TraitData/TRY/TRY_Fe.csv")
# write.csv(TRY_K,"TraitData/TRY/TRY_K.csv")
# write.csv(TRY_Mn,"TraitData/TRY/TRY_Mn.csv")
# write.csv(TRY_Mg,"TraitData/TRY/TRY_Mg.csv")
# write.csv(TRY_Na,"TraitData/TRY/TRY_Na.csv")
# write.csv(TRY_Zn,"TraitData/TRY/TRY_Zn.csv")
#####################################
## read CABO trait data
ref.traits<-readRDS("ProcessedSpectra/all_ref_and_traits.rds")
## the Pardo dataset has no trait data (yet)
ref.traits<-ref.traits[-which(meta(ref.traits)$project=="2019-Pardo-MSc-UdeM")]
trait.df<-meta(ref.traits)
TRY_SLA<-read.csv("TraitData/TRY/TRY_SLA.csv")
TRY_LDMC<-read.csv("TraitData/TRY/TRY_LDMC.csv")
TRY_N<-read.csv("TraitData/TRY/TRY_N.csv")
TRY_C<-read.csv("TraitData/TRY/TRY_C.csv")
TRY_solubles<-read.csv("TraitData/TRY/TRY_solubles.csv")
TRY_hemicellulose<-read.csv("TraitData/TRY/TRY_hemicellulose.csv")
TRY_cellulose<-read.csv("TraitData/TRY/TRY_cellulose.csv")
TRY_lignin<-read.csv("TraitData/TRY/TRY_lignin.csv")
TRY_Chl<-read.csv("TraitData/TRY/TRY_Chl.csv")
TRY_ChlA<-read.csv("TraitData/TRY/TRY_ChlA.csv")
TRY_ChlB<-read.csv("TraitData/TRY/TRY_ChlB.csv")
TRY_Car<-read.csv("TraitData/TRY/TRY_Car.csv")
TRY_Al<-read.csv("TraitData/TRY/TRY_Al.csv")
TRY_Ca<-read.csv("TraitData/TRY/TRY_Ca.csv")
TRY_Cu<-read.csv("TraitData/TRY/TRY_Cu.csv")
TRY_Fe<-read.csv("TraitData/TRY/TRY_Fe.csv")
TRY_K<-read.csv("TraitData/TRY/TRY_K.csv")
TRY_Mg<-read.csv("TraitData/TRY/TRY_Mg.csv")
TRY_Mn<-read.csv("TraitData/TRY/TRY_Mn.csv")
TRY_Na<-read.csv("TraitData/TRY/TRY_Na.csv")
TRY_P<-read.csv("TraitData/TRY/TRY_P.csv")
TRY_Zn<-read.csv("TraitData/TRY/TRY_Zn.csv")
TRY_SLA$LMA<-1/TRY_SLA$StdValue
####################################
## plot trait distributions
trait.df$cat<-"CABO"
ggplot(data=trait.df,
aes(x=chlA_mass,color=project))+
geom_density(size=1.25)+
theme_bw()+
scale_color_brewer(palette="Set3")
## LMA density plot
LMA_sub<-trait.df[,c("sample_id","LMA","cat")]
TRY_SLA_sub<-data.frame(sample_id=TRY_SLA$ObsDataID,
LMA=TRY_SLA$LMA,
cat="TRY")
all.LMA<-rbind(LMA_sub,TRY_SLA_sub)
LMA_density<-ggplot(data=all.LMA,
aes(x=LMA,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("LMA (kg m"^-2*")"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$LMA)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_SLA$LMA)),")",sep="")))
## EWT density plot
EWT_sub<-trait.df[,c("sample_id","EWT","cat")]
EWT_density<-ggplot(data=EWT_sub,
aes(x=EWT,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x="EWT (mm)")+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$EWT)),")",sep="")))
## LDMC density plot
LDMC_sub<-trait.df[,c("sample_id","LDMC","cat")]
TRY_LDMC_sub<-data.frame(sample_id=TRY_LDMC$ObsDataID,
LDMC=TRY_LDMC$StdValue*1000,
cat="TRY")
all.LDMC<-rbind(LDMC_sub,TRY_LDMC_sub)
LDMC_density<-ggplot(data=all.LDMC,
aes(x=LDMC,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.75,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("LDMC (mg g"^-1*")"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$LDMC)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_LDMC$StdValue)),")",sep="")))
## Nmass density plot
Nmass_sub<-trait.df[,c("sample_id","Nmass","cat")]
TRY_N_sub<-data.frame(sample_id=TRY_N$ObsDataID,
Nmass=TRY_N$StdValue/10,
cat="TRY")
all.Nmass<-rbind(Nmass_sub,TRY_N_sub)
Nmass_density<-ggplot(data=all.Nmass,
aes(x=Nmass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("N (%)"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Nmass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_N$StdValue)),")",sep="")))
## Cmass density plot
Cmass_sub<-trait.df[,c("sample_id","Cmass","cat")]
TRY_C_sub<-data.frame(sample_id=TRY_C$ObsDataID,
Cmass=TRY_C$StdValue/10,
cat="TRY")
all.Cmass<-rbind(Cmass_sub,TRY_C_sub)
Cmass_density<-ggplot(data=all.Cmass,
aes(x=Cmass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.25,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("C (%)"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Cmass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_C$StdValue)),")",sep="")))
## solubles density plot
solubles_mass_sub<-trait.df[,c("sample_id","solubles_mass","cat")]
TRY_solubles_sub<-data.frame(sample_id=TRY_solubles$ObsDataID,
solubles_mass=TRY_solubles$StdValue/10,
cat="TRY")
all.solubles_mass<-rbind(solubles_mass_sub,TRY_solubles_sub)
solubles_mass_density<-ggplot(data=all.solubles_mass,
aes(x=solubles_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.25,0.92),
legend.background = element_rect(fill="transparent"),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("Solubles (%)"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$solubles_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_solubles$StdValue)),")",sep="")))
## hemicellulose density plot
hemicellulose_mass_sub<-trait.df[,c("sample_id","hemicellulose_mass","cat")]
TRY_hemicellulose_sub<-data.frame(sample_id=TRY_hemicellulose$ObsDataID,
hemicellulose_mass=TRY_hemicellulose$StdValue/10,
cat="TRY")
all.hemicellulose_mass<-rbind(hemicellulose_mass_sub,TRY_hemicellulose_sub)
hemicellulose_mass_density<-ggplot(data=all.hemicellulose_mass,
aes(x=hemicellulose_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.8),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("Hemicellulose (%)"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$hemicellulose_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_hemicellulose$StdValue)),")",sep="")))
## cellulose density plot
cellulose_mass_sub<-trait.df[,c("sample_id","cellulose_mass","cat")]
TRY_cellulose_sub<-data.frame(sample_id=TRY_cellulose$ObsDataID,
cellulose_mass=TRY_cellulose$StdValue/10,
cat="TRY")
all.cellulose_mass<-rbind(cellulose_mass_sub,TRY_cellulose_sub)
cellulose_mass_density<-ggplot(data=all.cellulose_mass,
aes(x=cellulose_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.8),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("Cellulose (%)"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$cellulose_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_cellulose$StdValue)),")",sep="")))
## lignin density plot
lignin_mass_sub<-trait.df[,c("sample_id","lignin_mass","cat")]
TRY_lignin_sub<-data.frame(sample_id=TRY_lignin$ObsDataID,
lignin_mass=TRY_lignin$StdValue/10,
cat="TRY")
all.lignin_mass<-rbind(lignin_mass_sub,TRY_lignin_sub)
lignin_mass_density<-ggplot(data=all.lignin_mass,
aes(x=lignin_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.8),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("Lignin (%)"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$lignin_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_lignin$StdValue)),")",sep="")))
## chl density plot
trait.df$chl_mass<-trait.df$chlA_mass+trait.df$chlB_mass
chl_mass_sub<-trait.df[,c("sample_id","chl_mass","cat")]
TRY_Chl_sub<-data.frame(sample_id=TRY_Chl$ObsDataID,
chl_mass=TRY_Chl$StdValue,
cat="TRY")
all.chl_mass<-rbind(chl_mass_sub,TRY_Chl_sub)
chl_mass_density<-ggplot(data=all.chl_mass,
aes(x=chl_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("Total Chl (mg g"^-1*")"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$chlA_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Chl$StdValue)),")",sep="")))
## chlA density plot
chlA_mass_sub<-trait.df[,c("sample_id","chlA_mass","cat")]
TRY_ChlA_sub<-data.frame(sample_id=TRY_ChlA$ObsDataID,
chlA_mass=TRY_ChlA$StdValue,
cat="TRY")
all.chlA_mass<-rbind(chlA_mass_sub,TRY_ChlA_sub)
chlA_mass_density<-ggplot(data=all.chlA_mass,
aes(x=chlA_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Chl ",italic("a")," (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$chlA_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_ChlA$StdValue)),")",sep="")))
## chlB density plot
chlB_mass_sub<-trait.df[,c("sample_id","chlB_mass","cat")]
TRY_ChlB_sub<-data.frame(sample_id=TRY_ChlB$ObsDataID,
chlB_mass=TRY_ChlB$StdValue,
cat="TRY")
all.chlB_mass<-rbind(chlB_mass_sub,TRY_ChlB_sub)
chlB_mass_density<-ggplot(data=all.chlB_mass,
aes(x=chlB_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Chl ",italic("b")," (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$chlB_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_ChlB$StdValue)),")",sep="")))
## car density plot
car_mass_sub<-trait.df[,c("sample_id","car_mass","cat")]
TRY_Car_sub<-data.frame(sample_id=TRY_Car$ObsDataID,
car_mass=TRY_Car$StdValue,
cat="TRY")
all.car_mass<-rbind(car_mass_sub,TRY_Car_sub)
car_mass_density<-ggplot(data=all.car_mass,
aes(x=car_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("Carotenoids (mg g"^-1*")"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$car_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Car$StdValue)),")",sep="")))
## Al density plot
Al_mass_sub<-trait.df[,c("sample_id","Al_mass","cat")]
TRY_Al_sub<-data.frame(sample_id=TRY_Al$ObsDataID,
Al_mass=TRY_Al$StdValue,
cat="TRY")
all.Al_mass<-rbind(Al_mass_sub,TRY_Al_sub)
Al_mass_density<-ggplot(data=all.Al_mass,
aes(x=Al_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Al (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Al_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Al$StdValue)),")",sep="")))+
coord_cartesian(xlim=c(0,1))
## Ca density plot
Ca_mass_sub<-trait.df[,c("sample_id","Ca_mass","cat")]
TRY_Ca_sub<-data.frame(sample_id=TRY_Ca$ObsDataID,
Ca_mass=TRY_Ca$StdValue,
cat="TRY")
all.Ca_mass<-rbind(Ca_mass_sub,TRY_Ca_sub)
Ca_mass_density<-ggplot(data=all.Ca_mass,
aes(x=Ca_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Ca (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Ca_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Ca$StdValue)),")",sep="")))
## Cu density plot
Cu_mass_sub<-trait.df[,c("sample_id","Cu_mass","cat")]
TRY_Cu_sub<-data.frame(sample_id=TRY_Cu$ObsDataID,
Cu_mass=TRY_Cu$StdValue,
cat="TRY")
all.Cu_mass<-rbind(Cu_mass_sub,TRY_Cu_sub)
Cu_mass_density<-ggplot(data=all.Cu_mass,
aes(x=Cu_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Cu (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Cu_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Cu$StdValue)),")",sep="")))
## Fe density plot
Fe_mass_sub<-trait.df[,c("sample_id","Fe_mass","cat")]
TRY_Fe_sub<-data.frame(sample_id=TRY_Fe$ObsDataID,
Fe_mass=TRY_Fe$StdValue,
cat="TRY")
all.Fe_mass<-rbind(Fe_mass_sub,TRY_Fe_sub)
Fe_mass_density<-ggplot(data=all.Fe_mass,
aes(x=Fe_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Fe (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Fe_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Fe$StdValue)),")",sep="")))
## K density plot
K_mass_sub<-trait.df[,c("sample_id","K_mass","cat")]
TRY_K_sub<-data.frame(sample_id=TRY_K$ObsDataID,
K_mass=TRY_K$StdValue,
cat="TRY")
all.K_mass<-rbind(K_mass_sub,TRY_K_sub)
K_mass_density<-ggplot(data=all.K_mass,
aes(x=K_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("K (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$K_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_K$StdValue)),")",sep="")))
## Mg density plot
Mg_mass_sub<-trait.df[,c("sample_id","Mg_mass","cat")]
TRY_Mg_sub<-data.frame(sample_id=TRY_Mg$ObsDataID,
Mg_mass=TRY_Mg$StdValue,
cat="TRY")
all.Mg_mass<-rbind(Mg_mass_sub,TRY_Mg_sub)
Mg_mass_density<-ggplot(data=all.Mg_mass,
aes(x=Mg_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Mg (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Mg_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Mg$StdValue)),")",sep="")))
## Mn density plot
Mn_mass_sub<-trait.df[,c("sample_id","Mn_mass","cat")]
TRY_Mn_sub<-data.frame(sample_id=TRY_Mn$ObsDataID,
Mn_mass=TRY_Mn$StdValue,
cat="TRY")
all.Mn_mass<-rbind(Mn_mass_sub,TRY_Mn_sub)
Mn_mass_density<-ggplot(data=all.Mn_mass,
aes(x=Mn_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Mn (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Mn_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Mn$StdValue)),")",sep="")))
## Na density plot
Na_mass_sub<-trait.df[,c("sample_id","Na_mass","cat")]
TRY_Na_sub<-data.frame(sample_id=TRY_Na$ObsDataID,
Na_mass=TRY_Na$StdValue,
cat="TRY")
all.Na_mass<-rbind(Na_mass_sub,TRY_Na_sub)
Na_mass_density<-ggplot(data=all.Na_mass,
aes(x=Na_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Na (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Na_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Na$StdValue)),")",sep="")))+
coord_cartesian(xlim=c(0,15))
## P density plot
P_mass_sub<-trait.df[,c("sample_id","P_mass","cat")]
TRY_P_sub<-data.frame(sample_id=TRY_P$ObsDataID,
P_mass=TRY_P$StdValue,
cat="TRY")
all.P_mass<-rbind(P_mass_sub,TRY_P_sub)
P_mass_density<-ggplot(data=all.P_mass,
aes(x=P_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("P (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$P_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_P$StdValue)),")",sep="")))
## Zn density plot
Zn_mass_sub<-trait.df[,c("sample_id","Zn_mass","cat")]
TRY_Zn_sub<-data.frame(sample_id=TRY_Zn$ObsDataID,
Zn_mass=TRY_Zn$StdValue,
cat="TRY")
all.Zn_mass<-rbind(Zn_mass_sub,TRY_Zn_sub)
Zn_mass_density<-ggplot(data=all.Zn_mass,
aes(x=Zn_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Zn (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Zn_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Zn$StdValue)),")",sep="")))
pdf("Images/density_plot.pdf",width=21,height=30)
ggarrange(plotlist = list(LMA_density,EWT_density,LDMC_density,
Nmass_density,Cmass_density,solubles_mass_density,
hemicellulose_mass_density,cellulose_mass_density,lignin_mass_density,
chl_mass_density,car_mass_density,Al_mass_density,
Ca_mass_density,Cu_mass_density,Fe_mass_density,
K_mass_density,Mg_mass_density,Mn_mass_density,
Na_mass_density,P_mass_density,Zn_mass_density),
ncol = 3,nrow=7)
dev.off()
############################
## variance partitioning
variances<-data.frame(effect=c("species","genus","site","family","residual"))
varpart_LMA<-lmer(LMA~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$LMA<-as.data.frame(VarCorr(varpart_LMA))$vcov
varpart_LDMC<-lmer(LDMC~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$LDMC<-as.data.frame(VarCorr(varpart_LDMC))$vcov
varpart_Nmass<-lmer(Nmass~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$Nmass<-as.data.frame(VarCorr(varpart_Nmass))$vcov
varpart_EWT<-lmer(EWT~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$EWT<-as.data.frame(VarCorr(varpart_EWT))$vcov
varpart_chlA_mass<-lmer(chlA_mass~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$chlA_mass<-as.data.frame(VarCorr(varpart_chlA_mass))$vcov
varpart_hemicellulose_mass<-lmer(hemicellulose_mass~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$hemicellulose_mass<-as.data.frame(VarCorr(varpart_hemicellulose_mass))$vcov
varpart_lignin_mass<-lmer(lignin_mass~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$lignin_mass<-as.data.frame(VarCorr(varpart_lignin_mass))$vcov
variances_long<-gather(variances, trait, variance, LMA:lignin_mass, factor_key=TRUE)
ggplot(variances_long, aes(x = trait, y = variance,fill=effect)) +
geom_bar(position = "fill", stat="identity")+
scale_y_continuous(labels = scales::percent_format())
|
/07 compare_trait_distributions.R
|
no_license
|
ShanKothari/CABO-trait-models
|
R
| false
| false
| 34,153
|
r
|
setwd("C:/Users/kotha020/Dropbox/PostdocProjects/FreshLeafModels")
library(spectrolab)
library(ggplot2)
library(FNN)
library(lme4)
library(ggpubr)
#################################
## import TRY data
## TRY trait number reference
# SLA no petiole: 3115*
# LDMC: 47*
# Leaf N: 14*
# Leaf C: 13*
# Solubles: 85*
# Hemicellulose: 94*
# Cellulose: 92*
# Lignin: 87*
# Chlorophyll: 164*
# Chlorophyll a: 3474*
# Chlorophyll b: 3475*
# Carotenoids: 809*
# Al: 249*
# B: 250*
# Ca: 252*
# Cu: 255*
# Fe: 256*
# K: 44*
# Mg: 257*
# Mn: 258*
# Na: 260*
# P: 15*
# Zn: 268*
# Phenols: 147
# Tannins: 148
# ## leaf structure -- includes LDMC and SLA (TRY request 14553)
# TRY_struc<-read.table("TraitData/TRY/TRYStructure/14553.txt",
# sep = "\t",fill=T,header=T,quote="")
#
# TRY_struc_sub<-TRY_struc[which(TRY_struc$TraitID %in% c(3115,47)),]
# TRY_struc_sub$StdValue<-as.numeric(as.character(TRY_struc_sub$StdValue))
# TRY_struc_sub$ErrorRisk<-as.numeric(as.character(TRY_struc_sub$ErrorRisk))
# TRY_struc_sub<-TRY_struc_sub[!is.na(TRY_struc_sub$StdValue),]
# TRY_struc_sub<-TRY_struc_sub[which(TRY_struc_sub$ErrorRisk<3),]
# TRY_struc_sub<-TRY_struc_sub[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# TRY_LDMC<-TRY_struc_sub[which(TRY_struc_sub$TraitID==47),]
# TRY_SLA<-TRY_struc_sub[which(TRY_struc_sub$TraitID==3115),]
#
# write.csv(TRY_LDMC,"TraitData/TRY/TRY_LDMC.csv")
# write.csv(TRY_SLA,"TraitData/TRY/TRY_SLA.csv")
#
# ## leaf macronutrients -- includes C, N, and P (TRY request 14554)
# TRY_CNP<-read.table("TraitData/TRY/TRYNutrients/14554.txt",
# sep = "\t",fill=T,header=T,quote = "")
#
# TRY_CNP_sub<-TRY_CNP[which(TRY_CNP$TraitID %in% 13:15),]
# TRY_CNP_sub$StdValue<-as.numeric(as.character(TRY_CNP_sub$StdValue))
# TRY_CNP_sub$ErrorRisk<-as.numeric(as.character(TRY_CNP_sub$ErrorRisk))
# TRY_CNP_sub<-TRY_CNP_sub[!is.na(TRY_CNP_sub$StdValue),]
# TRY_CNP_sub<-TRY_CNP_sub[which(TRY_CNP_sub$ErrorRisk<3),]
# TRY_CNP_sub<-TRY_CNP_sub[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# TRY_C<-TRY_CNP_sub[which(TRY_CNP_sub$TraitID==13),]
# TRY_N<-TRY_CNP_sub[which(TRY_CNP_sub$TraitID==14),]
# TRY_P<-TRY_CNP_sub[which(TRY_CNP_sub$TraitID==15),]
#
# write.csv(TRY_C,"TraitData/TRY/TRY_C.csv")
# write.csv(TRY_N,"TraitData/TRY/TRY_N.csv")
# write.csv(TRY_P,"TraitData/TRY/TRY_P.csv")
#
# ## leaf carbon fractions (TRY request 14555)
# TRY_CFrac<-read.table("TraitData/TRY/TRYCFrac/14555.txt",
# sep = "\t",fill=T,header=T,quote = "")
#
# TRY_CFrac_sub<-TRY_CFrac[which(TRY_CFrac$TraitID %in% c(85,87,92,94)),]
# TRY_CFrac_sub$StdValue<-as.numeric(as.character(TRY_CFrac_sub$StdValue))
# TRY_CFrac_sub$ErrorRisk<-as.numeric(as.character(TRY_CFrac_sub$ErrorRisk))
# TRY_CFrac_sub<-TRY_CFrac_sub[!is.na(TRY_CFrac_sub$StdValue),]
# TRY_CFrac_sub<-TRY_CFrac_sub[which(TRY_CFrac_sub$ErrorRisk<3),]
# TRY_CFrac_sub<-TRY_CFrac_sub[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# TRY_solubles<-TRY_CFrac_sub[which(TRY_CFrac_sub$TraitID==85),]
# TRY_lignin<-TRY_CFrac_sub[which(TRY_CFrac_sub$TraitID==87),]
# TRY_cellulose<-TRY_CFrac_sub[which(TRY_CFrac_sub$TraitID==92),]
# TRY_hemicellulose<-TRY_CFrac_sub[which(TRY_CFrac_sub$TraitID==94),]
#
# write.csv(TRY_solubles,"TraitData/TRY/TRY_solubles.csv")
# write.csv(TRY_lignin,"TraitData/TRY/TRY_lignin.csv")
# write.csv(TRY_cellulose,"TraitData/TRY/TRY_cellulose.csv")
# write.csv(TRY_hemicellulose,"TraitData/TRY/TRY_hemicellulose.csv")
#
## leaf pigments (TRY request 14556)
# TRY_pigments<-read.table("TraitData/TRY/TRYPigments/14556.txt",
# sep = "\t",fill=T,header=T,quote = "")
#
# TRY_pigments_sub<-TRY_pigments[which(TRY_pigments$TraitID %in% c(164,809,3474,3475)),]
# TRY_pigments_sub$OrigValueStr<-as.numeric(as.character(TRY_pigments_sub$OrigValueStr))
# TRY_pigments_sub$StdValue<-as.numeric(as.character(TRY_pigments_sub$StdValue))
# TRY_pigments_sub$ErrorRisk<-as.numeric(as.character(TRY_pigments_sub$ErrorRisk))
## all records for ChlA, ChlB, and car have only original values
## and are missing error risk assessments,]
# TRY_Chl<-TRY_pigments_sub[which(TRY_pigments_sub$TraitID==164),]
# TRY_Chl<-TRY_Chl[which(TRY_Chl$ErrorRisk<3),]
# TRY_Chl<-TRY_Chl[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# micro_ChlA<-which(TRY_ChlA$OrigUnitStr %in% c("micro g / g","micro g/g"))
# TRY_ChlA<-TRY_pigments_sub[which(TRY_pigments_sub$TraitID==3474),]
# TRY_ChlA$StdValue<-TRY_ChlA$OrigValueStr
# TRY_ChlA$StdValue[micro_chlA]<-TRY_ChlA$OrigValueStr[micro_chlA]/1000
# TRY_ChlA$UnitName<-"mg g-1"
# TRY_ChlA<-TRY_ChlA[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
#
# micro_ChlB<-which(TRY_ChlB$OrigUnitStr %in% c("micro g / g","micro g/g"))
# TRY_ChlB<-TRY_pigments_sub[which(TRY_pigments_sub$TraitID==3475),]
# TRY_ChlB$StdValue<-TRY_ChlB$OrigValueStr
# TRY_ChlB$StdValue[micro_ChlB]<-TRY_ChlB$OrigValueStr[micro_ChlB]/1000
# TRY_ChlB$UnitName<-"mg g-1"
# TRY_ChlB<-TRY_ChlB[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# micro_Car<-which(TRY_Car$OrigUnitStr %in% c("micro g / g","micro g/g"))
# TRY_Car<-TRY_pigments_sub[which(TRY_pigments_sub$TraitID==809),]
# TRY_Car$StdValue<-TRY_Car$OrigValueStr
# TRY_Car$StdValue[micro_Car]<-TRY_Car$OrigValueStr[micro_Car]/1000
# TRY_Car$UnitName<-"mg g-1"
# TRY_Car<-TRY_Car[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# write.csv(TRY_Chl,"TraitData/TRY/TRY_Chl.csv")
# write.csv(TRY_ChlA,"TraitData/TRY/TRY_ChlA.csv")
# write.csv(TRY_ChlB,"TraitData/TRY/TRY_ChlB.csv")
# write.csv(TRY_Car,"TraitData/TRY/TRY_Car.csv")
#
# ## leaf micronutrients (TRY request 14557)
# TRY_Micro<-read.table("TraitData/TRY/TRYMicro/14557.txt",
# sep = "\t",fill=T,header=T,quote = "")
#
# TRY_Micro_sub<-TRY_Micro[which(TRY_Micro$TraitID %in% c(249,250,252,255,256,44,
# 257,258,260,268)),]
# TRY_Micro_sub$StdValue<-as.numeric(as.character(TRY_Micro_sub$StdValue))
# TRY_Micro_sub$ErrorRisk<-as.numeric(as.character(TRY_Micro_sub$ErrorRisk))
# TRY_Micro_sub<-TRY_Micro_sub[!is.na(TRY_Micro_sub$StdValue),]
# TRY_Micro_sub<-TRY_Micro_sub[which(TRY_Micro_sub$ErrorRisk<3),]
# TRY_Micro_sub<-TRY_Micro_sub[,c("DatasetID","SpeciesName","AccSpeciesID","ObservationID",
# "ObsDataID","TraitID","TraitName","StdValue","UnitName","ErrorRisk")]
#
# TRY_Al<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==249),]
# TRY_B<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==250),]
# TRY_Ca<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==252),]
# TRY_Cu<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==255),]
# TRY_Fe<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==256),]
# TRY_K<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==44),]
# TRY_Mg<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==257),]
# TRY_Mn<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==258),]
# TRY_Na<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==260),]
# TRY_Zn<-TRY_Micro_sub[which(TRY_Micro_sub$TraitID==268),]
#
# write.csv(TRY_Al,"TraitData/TRY/TRY_Al.csv")
# write.csv(TRY_B,"TraitData/TRY/TRY_B.csv")
# write.csv(TRY_Ca,"TraitData/TRY/TRY_Ca.csv")
# write.csv(TRY_Cu,"TraitData/TRY/TRY_Cu.csv")
# write.csv(TRY_Fe,"TraitData/TRY/TRY_Fe.csv")
# write.csv(TRY_K,"TraitData/TRY/TRY_K.csv")
# write.csv(TRY_Mn,"TraitData/TRY/TRY_Mn.csv")
# write.csv(TRY_Mg,"TraitData/TRY/TRY_Mg.csv")
# write.csv(TRY_Na,"TraitData/TRY/TRY_Na.csv")
# write.csv(TRY_Zn,"TraitData/TRY/TRY_Zn.csv")
#####################################
## read CABO trait data
ref.traits<-readRDS("ProcessedSpectra/all_ref_and_traits.rds")
## the Pardo dataset has no trait data (yet)
ref.traits<-ref.traits[-which(meta(ref.traits)$project=="2019-Pardo-MSc-UdeM")]
trait.df<-meta(ref.traits)
TRY_SLA<-read.csv("TraitData/TRY/TRY_SLA.csv")
TRY_LDMC<-read.csv("TraitData/TRY/TRY_LDMC.csv")
TRY_N<-read.csv("TraitData/TRY/TRY_N.csv")
TRY_C<-read.csv("TraitData/TRY/TRY_C.csv")
TRY_solubles<-read.csv("TraitData/TRY/TRY_solubles.csv")
TRY_hemicellulose<-read.csv("TraitData/TRY/TRY_hemicellulose.csv")
TRY_cellulose<-read.csv("TraitData/TRY/TRY_cellulose.csv")
TRY_lignin<-read.csv("TraitData/TRY/TRY_lignin.csv")
TRY_Chl<-read.csv("TraitData/TRY/TRY_Chl.csv")
TRY_ChlA<-read.csv("TraitData/TRY/TRY_ChlA.csv")
TRY_ChlB<-read.csv("TraitData/TRY/TRY_ChlB.csv")
TRY_Car<-read.csv("TraitData/TRY/TRY_Car.csv")
TRY_Al<-read.csv("TraitData/TRY/TRY_Al.csv")
TRY_Ca<-read.csv("TraitData/TRY/TRY_Ca.csv")
TRY_Cu<-read.csv("TraitData/TRY/TRY_Cu.csv")
TRY_Fe<-read.csv("TraitData/TRY/TRY_Fe.csv")
TRY_K<-read.csv("TraitData/TRY/TRY_K.csv")
TRY_Mg<-read.csv("TraitData/TRY/TRY_Mg.csv")
TRY_Mn<-read.csv("TraitData/TRY/TRY_Mn.csv")
TRY_Na<-read.csv("TraitData/TRY/TRY_Na.csv")
TRY_P<-read.csv("TraitData/TRY/TRY_P.csv")
TRY_Zn<-read.csv("TraitData/TRY/TRY_Zn.csv")
TRY_SLA$LMA<-1/TRY_SLA$StdValue
####################################
## plot trait distributions
trait.df$cat<-"CABO"
ggplot(data=trait.df,
aes(x=chlA_mass,color=project))+
geom_density(size=1.25)+
theme_bw()+
scale_color_brewer(palette="Set3")
## LMA density plot
LMA_sub<-trait.df[,c("sample_id","LMA","cat")]
TRY_SLA_sub<-data.frame(sample_id=TRY_SLA$ObsDataID,
LMA=TRY_SLA$LMA,
cat="TRY")
all.LMA<-rbind(LMA_sub,TRY_SLA_sub)
LMA_density<-ggplot(data=all.LMA,
aes(x=LMA,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("LMA (kg m"^-2*")"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$LMA)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_SLA$LMA)),")",sep="")))
## EWT density plot
EWT_sub<-trait.df[,c("sample_id","EWT","cat")]
EWT_density<-ggplot(data=EWT_sub,
aes(x=EWT,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x="EWT (mm)")+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$EWT)),")",sep="")))
## LDMC density plot
LDMC_sub<-trait.df[,c("sample_id","LDMC","cat")]
TRY_LDMC_sub<-data.frame(sample_id=TRY_LDMC$ObsDataID,
LDMC=TRY_LDMC$StdValue*1000,
cat="TRY")
all.LDMC<-rbind(LDMC_sub,TRY_LDMC_sub)
LDMC_density<-ggplot(data=all.LDMC,
aes(x=LDMC,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.75,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("LDMC (mg g"^-1*")"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$LDMC)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_LDMC$StdValue)),")",sep="")))
## Nmass density plot
Nmass_sub<-trait.df[,c("sample_id","Nmass","cat")]
TRY_N_sub<-data.frame(sample_id=TRY_N$ObsDataID,
Nmass=TRY_N$StdValue/10,
cat="TRY")
all.Nmass<-rbind(Nmass_sub,TRY_N_sub)
Nmass_density<-ggplot(data=all.Nmass,
aes(x=Nmass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("N (%)"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Nmass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_N$StdValue)),")",sep="")))
## Cmass density plot
Cmass_sub<-trait.df[,c("sample_id","Cmass","cat")]
TRY_C_sub<-data.frame(sample_id=TRY_C$ObsDataID,
Cmass=TRY_C$StdValue/10,
cat="TRY")
all.Cmass<-rbind(Cmass_sub,TRY_C_sub)
Cmass_density<-ggplot(data=all.Cmass,
aes(x=Cmass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.25,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("C (%)"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Cmass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_C$StdValue)),")",sep="")))
## solubles density plot
solubles_mass_sub<-trait.df[,c("sample_id","solubles_mass","cat")]
TRY_solubles_sub<-data.frame(sample_id=TRY_solubles$ObsDataID,
solubles_mass=TRY_solubles$StdValue/10,
cat="TRY")
all.solubles_mass<-rbind(solubles_mass_sub,TRY_solubles_sub)
solubles_mass_density<-ggplot(data=all.solubles_mass,
aes(x=solubles_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.25,0.92),
legend.background = element_rect(fill="transparent"),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("Solubles (%)"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$solubles_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_solubles$StdValue)),")",sep="")))
## hemicellulose density plot
hemicellulose_mass_sub<-trait.df[,c("sample_id","hemicellulose_mass","cat")]
TRY_hemicellulose_sub<-data.frame(sample_id=TRY_hemicellulose$ObsDataID,
hemicellulose_mass=TRY_hemicellulose$StdValue/10,
cat="TRY")
all.hemicellulose_mass<-rbind(hemicellulose_mass_sub,TRY_hemicellulose_sub)
hemicellulose_mass_density<-ggplot(data=all.hemicellulose_mass,
aes(x=hemicellulose_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.8),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("Hemicellulose (%)"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$hemicellulose_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_hemicellulose$StdValue)),")",sep="")))
## cellulose density plot
cellulose_mass_sub<-trait.df[,c("sample_id","cellulose_mass","cat")]
TRY_cellulose_sub<-data.frame(sample_id=TRY_cellulose$ObsDataID,
cellulose_mass=TRY_cellulose$StdValue/10,
cat="TRY")
all.cellulose_mass<-rbind(cellulose_mass_sub,TRY_cellulose_sub)
cellulose_mass_density<-ggplot(data=all.cellulose_mass,
aes(x=cellulose_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.8),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("Cellulose (%)"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$cellulose_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_cellulose$StdValue)),")",sep="")))
## lignin density plot
lignin_mass_sub<-trait.df[,c("sample_id","lignin_mass","cat")]
TRY_lignin_sub<-data.frame(sample_id=TRY_lignin$ObsDataID,
lignin_mass=TRY_lignin$StdValue/10,
cat="TRY")
all.lignin_mass<-rbind(lignin_mass_sub,TRY_lignin_sub)
lignin_mass_density<-ggplot(data=all.lignin_mass,
aes(x=lignin_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.8),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("Lignin (%)"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$lignin_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_lignin$StdValue)),")",sep="")))
## chl density plot
trait.df$chl_mass<-trait.df$chlA_mass+trait.df$chlB_mass
chl_mass_sub<-trait.df[,c("sample_id","chl_mass","cat")]
TRY_Chl_sub<-data.frame(sample_id=TRY_Chl$ObsDataID,
chl_mass=TRY_Chl$StdValue,
cat="TRY")
all.chl_mass<-rbind(chl_mass_sub,TRY_Chl_sub)
chl_mass_density<-ggplot(data=all.chl_mass,
aes(x=chl_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("Total Chl (mg g"^-1*")"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$chlA_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Chl$StdValue)),")",sep="")))
## chlA density plot
chlA_mass_sub<-trait.df[,c("sample_id","chlA_mass","cat")]
TRY_ChlA_sub<-data.frame(sample_id=TRY_ChlA$ObsDataID,
chlA_mass=TRY_ChlA$StdValue,
cat="TRY")
all.chlA_mass<-rbind(chlA_mass_sub,TRY_ChlA_sub)
chlA_mass_density<-ggplot(data=all.chlA_mass,
aes(x=chlA_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Chl ",italic("a")," (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$chlA_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_ChlA$StdValue)),")",sep="")))
## chlB density plot
chlB_mass_sub<-trait.df[,c("sample_id","chlB_mass","cat")]
TRY_ChlB_sub<-data.frame(sample_id=TRY_ChlB$ObsDataID,
chlB_mass=TRY_ChlB$StdValue,
cat="TRY")
all.chlB_mass<-rbind(chlB_mass_sub,TRY_ChlB_sub)
chlB_mass_density<-ggplot(data=all.chlB_mass,
aes(x=chlB_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Chl ",italic("b")," (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$chlB_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_ChlB$StdValue)),")",sep="")))
## car density plot
car_mass_sub<-trait.df[,c("sample_id","car_mass","cat")]
TRY_Car_sub<-data.frame(sample_id=TRY_Car$ObsDataID,
car_mass=TRY_Car$StdValue,
cat="TRY")
all.car_mass<-rbind(car_mass_sub,TRY_Car_sub)
car_mass_density<-ggplot(data=all.car_mass,
aes(x=car_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression("Carotenoids (mg g"^-1*")"))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$car_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Car$StdValue)),")",sep="")))
## Al density plot
Al_mass_sub<-trait.df[,c("sample_id","Al_mass","cat")]
TRY_Al_sub<-data.frame(sample_id=TRY_Al$ObsDataID,
Al_mass=TRY_Al$StdValue,
cat="TRY")
all.Al_mass<-rbind(Al_mass_sub,TRY_Al_sub)
Al_mass_density<-ggplot(data=all.Al_mass,
aes(x=Al_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Al (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Al_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Al$StdValue)),")",sep="")))+
coord_cartesian(xlim=c(0,1))
## Ca density plot
Ca_mass_sub<-trait.df[,c("sample_id","Ca_mass","cat")]
TRY_Ca_sub<-data.frame(sample_id=TRY_Ca$ObsDataID,
Ca_mass=TRY_Ca$StdValue,
cat="TRY")
all.Ca_mass<-rbind(Ca_mass_sub,TRY_Ca_sub)
Ca_mass_density<-ggplot(data=all.Ca_mass,
aes(x=Ca_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Ca (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Ca_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Ca$StdValue)),")",sep="")))
## Cu density plot
Cu_mass_sub<-trait.df[,c("sample_id","Cu_mass","cat")]
TRY_Cu_sub<-data.frame(sample_id=TRY_Cu$ObsDataID,
Cu_mass=TRY_Cu$StdValue,
cat="TRY")
all.Cu_mass<-rbind(Cu_mass_sub,TRY_Cu_sub)
Cu_mass_density<-ggplot(data=all.Cu_mass,
aes(x=Cu_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Cu (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Cu_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Cu$StdValue)),")",sep="")))
## Fe density plot
Fe_mass_sub<-trait.df[,c("sample_id","Fe_mass","cat")]
TRY_Fe_sub<-data.frame(sample_id=TRY_Fe$ObsDataID,
Fe_mass=TRY_Fe$StdValue,
cat="TRY")
all.Fe_mass<-rbind(Fe_mass_sub,TRY_Fe_sub)
Fe_mass_density<-ggplot(data=all.Fe_mass,
aes(x=Fe_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Fe (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Fe_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Fe$StdValue)),")",sep="")))
## K density plot
K_mass_sub<-trait.df[,c("sample_id","K_mass","cat")]
TRY_K_sub<-data.frame(sample_id=TRY_K$ObsDataID,
K_mass=TRY_K$StdValue,
cat="TRY")
all.K_mass<-rbind(K_mass_sub,TRY_K_sub)
K_mass_density<-ggplot(data=all.K_mass,
aes(x=K_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("K (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$K_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_K$StdValue)),")",sep="")))
## Mg density plot
Mg_mass_sub<-trait.df[,c("sample_id","Mg_mass","cat")]
TRY_Mg_sub<-data.frame(sample_id=TRY_Mg$ObsDataID,
Mg_mass=TRY_Mg$StdValue,
cat="TRY")
all.Mg_mass<-rbind(Mg_mass_sub,TRY_Mg_sub)
Mg_mass_density<-ggplot(data=all.Mg_mass,
aes(x=Mg_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Mg (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Mg_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Mg$StdValue)),")",sep="")))
## Mn density plot
Mn_mass_sub<-trait.df[,c("sample_id","Mn_mass","cat")]
TRY_Mn_sub<-data.frame(sample_id=TRY_Mn$ObsDataID,
Mn_mass=TRY_Mn$StdValue,
cat="TRY")
all.Mn_mass<-rbind(Mn_mass_sub,TRY_Mn_sub)
Mn_mass_density<-ggplot(data=all.Mn_mass,
aes(x=Mn_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Mn (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Mn_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Mn$StdValue)),")",sep="")))
## Na density plot
Na_mass_sub<-trait.df[,c("sample_id","Na_mass","cat")]
TRY_Na_sub<-data.frame(sample_id=TRY_Na$ObsDataID,
Na_mass=TRY_Na$StdValue,
cat="TRY")
all.Na_mass<-rbind(Na_mass_sub,TRY_Na_sub)
Na_mass_density<-ggplot(data=all.Na_mass,
aes(x=Na_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Na (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Na_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Na$StdValue)),")",sep="")))+
coord_cartesian(xlim=c(0,15))
## P density plot
P_mass_sub<-trait.df[,c("sample_id","P_mass","cat")]
TRY_P_sub<-data.frame(sample_id=TRY_P$ObsDataID,
P_mass=TRY_P$StdValue,
cat="TRY")
all.P_mass<-rbind(P_mass_sub,TRY_P_sub)
P_mass_density<-ggplot(data=all.P_mass,
aes(x=P_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("P (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$P_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_P$StdValue)),")",sep="")))
## Zn density plot
Zn_mass_sub<-trait.df[,c("sample_id","Zn_mass","cat")]
TRY_Zn_sub<-data.frame(sample_id=TRY_Zn$ObsDataID,
Zn_mass=TRY_Zn$StdValue,
cat="TRY")
all.Zn_mass<-rbind(Zn_mass_sub,TRY_Zn_sub)
Zn_mass_density<-ggplot(data=all.Zn_mass,
aes(x=Zn_mass,color=cat))+
geom_density(size=1.5,bounds=c(0,Inf))+
theme_bw()+
theme(legend.title = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size=30),
legend.position = c(0.7,0.7),
axis.title.y=element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())+
labs(y="Density",x=expression(paste("Zn (mg g"^-1*")")))+
scale_color_discrete(labels=c(paste("CABO (n=",sum(!is.na(trait.df$Zn_mass)),")",sep=""),
paste("TRY (n=",sum(!is.na(TRY_Zn$StdValue)),")",sep="")))
pdf("Images/density_plot.pdf",width=21,height=30)
ggarrange(plotlist = list(LMA_density,EWT_density,LDMC_density,
Nmass_density,Cmass_density,solubles_mass_density,
hemicellulose_mass_density,cellulose_mass_density,lignin_mass_density,
chl_mass_density,car_mass_density,Al_mass_density,
Ca_mass_density,Cu_mass_density,Fe_mass_density,
K_mass_density,Mg_mass_density,Mn_mass_density,
Na_mass_density,P_mass_density,Zn_mass_density),
ncol = 3,nrow=7)
dev.off()
############################
## variance partitioning
variances<-data.frame(effect=c("species","genus","site","family","residual"))
varpart_LMA<-lmer(LMA~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$LMA<-as.data.frame(VarCorr(varpart_LMA))$vcov
varpart_LDMC<-lmer(LDMC~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$LDMC<-as.data.frame(VarCorr(varpart_LDMC))$vcov
varpart_Nmass<-lmer(Nmass~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$Nmass<-as.data.frame(VarCorr(varpart_Nmass))$vcov
varpart_EWT<-lmer(EWT~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$EWT<-as.data.frame(VarCorr(varpart_EWT))$vcov
varpart_chlA_mass<-lmer(chlA_mass~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$chlA_mass<-as.data.frame(VarCorr(varpart_chlA_mass))$vcov
varpart_hemicellulose_mass<-lmer(hemicellulose_mass~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$hemicellulose_mass<-as.data.frame(VarCorr(varpart_hemicellulose_mass))$vcov
varpart_lignin_mass<-lmer(lignin_mass~1+(1|family/genus/species)+(1|site),data=trait.df)
variances$lignin_mass<-as.data.frame(VarCorr(varpart_lignin_mass))$vcov
variances_long<-gather(variances, trait, variance, LMA:lignin_mass, factor_key=TRUE)
ggplot(variances_long, aes(x = trait, y = variance,fill=effect)) +
geom_bar(position = "fill", stat="identity")+
scale_y_continuous(labels = scales::percent_format())
|
\name{comparisonsTable}
\docType{methods}
\alias{comparisonsTable}
\title{
Create a Table of Comparisons amongst Groups
}
\description{
Create a table of comparisons based on a fit by the \pkg{cg}
package.
}
\usage{
comparisonsTable(fit, type = "pairwisereflect", alpha = 0.05, addpct = FALSE,
display = "print", \dots)
}
\arguments{
\item{fit }{
A fit object created with a \code{\link{fit}} method from the \pkg{cg}
package. See specific methods.
}
\item{type }{Can be one of four values:
\describe{
\item{\code{"pairwisereflect"}}{
The default value; It calculates and lists all possible
pairwise comparison permutations, as each pair order is
included. In other words, Groups A vs. B and B vs. A will be
included.
}
\item{\code{"pairwise"}}{Calculates and lists all possible
pairwise comparison combinations. Once a pair such as Groups
A vs. B is specified, then the reflective B vs. A is not
included. So the number of comparisons is half that produced
by \code{"pairwisereflect"}. The ordering of group levels in the
\code{fit} object is used to determine which
ordering is included and which is not. If all orderings are
of interest, such as for \code{settings$endptscale=="log"} in the \code{fit}
object, use the \code{"pairwisereflect"} value above.
}
\item{\code{"allgroupstocontrol"}}{Takes the value of
\code{settings$refgrp} in the \pkg{cg} \code{fit}
object, deems it the "control" group, and constructs
pairwise comparisons of all other groups to it.
}
\item{\code{"custom"}}{Indicates that a custom matrix of
comparisons will be constructed, and that matrix needs to be
specified in the \code{contrastmatrix} argument of a method.
}
}
}
\item{alpha }{Significance level, by default set to \code{0.05}.
}
\item{addpct }{Only relevant if \code{settings$endptscale=="original"} in the
fit object. An column of percent differences is added for the
comparisons, as a descriptive supplement to the original scale
differences that are formally estimated.
}
\item{display }{One of three valid values:
\describe{
\item{\code{"print"}}{
The default value, it calls a \code{print} method for the
created \code{ComparisonsTable} object, which is
a formatted text output of the table(s).
}
\item{\code{"none"}}{
Supresses any printing. Useful, for example, when just assignment
of the resulting object is desired.
}
\item{\code{"show"}}{
Calls the default \code{\link{showDefault}} method, which
will just print out the \code{ComparisonsTable} object components.
}
}
}
\item{\dots }{
Additional arguments, depending on the specific method written for
the object. See the method-specific documentation for additional
details.
}
}
\value{
A method-specific \code{comparisonsTable} object is returned.
See the specific methods for discussion of return values.
}
\author{
Bill Pikounis [aut, cre, cph], John Oleynick [aut], Eva Ye [ctb]
}
\note{
Contact \email{cg@billpikounis.net} for bug reports, questions,
concerns, and comments.
}
\seealso{
\code{\link{comparisonsTable.cgOneFactorFit}},
\code{\link{comparisonsTable.cgPairedDifferenceFit}}.
}
\examples{
#### One Factor data
data(canine)
canine.data <- prepareCGOneFactorData(canine, format="groupcolumns",
analysisname="Canine",
endptname="Prostate Volume",
endptunits=expression(plain(cm)^3),
digits=1, logscale=TRUE, refgrp="CC")
canine.fit <- fit(canine.data)
canine.comps0 <- comparisonsTable(canine.fit)
canine.comps1 <- comparisonsTable(canine.fit, mcadjust=TRUE,
type="allgroupstocontrol", refgrp="CC")
data(gmcsfcens)
gmcsfcens.data <- prepareCGOneFactorData(gmcsfcens, format="groupcolumns",
analysisname="cytokine",
endptname="GM-CSF (pg/ml)",
logscale=TRUE)
gmcsfcens.fit <- fit(gmcsfcens.data, type="aft")
gmcsfcens.comps <- comparisonsTable(gmcsfcens.fit)
## Paired Difference data
data(anorexiaFT)
anorexiaFT.data <- prepareCGPairedDifferenceData(anorexiaFT, format="groupcolumns",
analysisname="Anorexia FT",
endptname="Weight",
endptunits="lbs",
expunitname="Patient",
digits=1,
logscale=TRUE)
anorexiaFT.fit <- fit(anorexiaFT.data)
comparisonsTable(anorexiaFT.fit)
}
\concept{comparisons}
\concept{multiplicity}
|
/man/comparisonsTable.Rd
|
no_license
|
cran/cg
|
R
| false
| false
| 5,054
|
rd
|
\name{comparisonsTable}
\docType{methods}
\alias{comparisonsTable}
\title{
Create a Table of Comparisons amongst Groups
}
\description{
Create a table of comparisons based on a fit by the \pkg{cg}
package.
}
\usage{
comparisonsTable(fit, type = "pairwisereflect", alpha = 0.05, addpct = FALSE,
display = "print", \dots)
}
\arguments{
\item{fit }{
A fit object created with a \code{\link{fit}} method from the \pkg{cg}
package. See specific methods.
}
\item{type }{Can be one of four values:
\describe{
\item{\code{"pairwisereflect"}}{
The default value; It calculates and lists all possible
pairwise comparison permutations, as each pair order is
included. In other words, Groups A vs. B and B vs. A will be
included.
}
\item{\code{"pairwise"}}{Calculates and lists all possible
pairwise comparison combinations. Once a pair such as Groups
A vs. B is specified, then the reflective B vs. A is not
included. So the number of comparisons is half that produced
by \code{"pairwisereflect"}. The ordering of group levels in the
\code{fit} object is used to determine which
ordering is included and which is not. If all orderings are
of interest, such as for \code{settings$endptscale=="log"} in the \code{fit}
object, use the \code{"pairwisereflect"} value above.
}
\item{\code{"allgroupstocontrol"}}{Takes the value of
\code{settings$refgrp} in the \pkg{cg} \code{fit}
object, deems it the "control" group, and constructs
pairwise comparisons of all other groups to it.
}
\item{\code{"custom"}}{Indicates that a custom matrix of
comparisons will be constructed, and that matrix needs to be
specified in the \code{contrastmatrix} argument of a method.
}
}
}
\item{alpha }{Significance level, by default set to \code{0.05}.
}
\item{addpct }{Only relevant if \code{settings$endptscale=="original"} in the
fit object. An column of percent differences is added for the
comparisons, as a descriptive supplement to the original scale
differences that are formally estimated.
}
\item{display }{One of three valid values:
\describe{
\item{\code{"print"}}{
The default value, it calls a \code{print} method for the
created \code{ComparisonsTable} object, which is
a formatted text output of the table(s).
}
\item{\code{"none"}}{
Supresses any printing. Useful, for example, when just assignment
of the resulting object is desired.
}
\item{\code{"show"}}{
Calls the default \code{\link{showDefault}} method, which
will just print out the \code{ComparisonsTable} object components.
}
}
}
\item{\dots }{
Additional arguments, depending on the specific method written for
the object. See the method-specific documentation for additional
details.
}
}
\value{
A method-specific \code{comparisonsTable} object is returned.
See the specific methods for discussion of return values.
}
\author{
Bill Pikounis [aut, cre, cph], John Oleynick [aut], Eva Ye [ctb]
}
\note{
Contact \email{cg@billpikounis.net} for bug reports, questions,
concerns, and comments.
}
\seealso{
\code{\link{comparisonsTable.cgOneFactorFit}},
\code{\link{comparisonsTable.cgPairedDifferenceFit}}.
}
\examples{
#### One Factor data
data(canine)
canine.data <- prepareCGOneFactorData(canine, format="groupcolumns",
analysisname="Canine",
endptname="Prostate Volume",
endptunits=expression(plain(cm)^3),
digits=1, logscale=TRUE, refgrp="CC")
canine.fit <- fit(canine.data)
canine.comps0 <- comparisonsTable(canine.fit)
canine.comps1 <- comparisonsTable(canine.fit, mcadjust=TRUE,
type="allgroupstocontrol", refgrp="CC")
data(gmcsfcens)
gmcsfcens.data <- prepareCGOneFactorData(gmcsfcens, format="groupcolumns",
analysisname="cytokine",
endptname="GM-CSF (pg/ml)",
logscale=TRUE)
gmcsfcens.fit <- fit(gmcsfcens.data, type="aft")
gmcsfcens.comps <- comparisonsTable(gmcsfcens.fit)
## Paired Difference data
data(anorexiaFT)
anorexiaFT.data <- prepareCGPairedDifferenceData(anorexiaFT, format="groupcolumns",
analysisname="Anorexia FT",
endptname="Weight",
endptunits="lbs",
expunitname="Patient",
digits=1,
logscale=TRUE)
anorexiaFT.fit <- fit(anorexiaFT.data)
comparisonsTable(anorexiaFT.fit)
}
\concept{comparisons}
\concept{multiplicity}
|
cat("\014"); rm(list=ls())
setwd(here::here())
library(readxl)
library(dplyr)
pth <- "data/raw_ageing/aaaOriginals/"
dfB <- read_excel(paste0(pth,"S_cantharus_data_Neves_etal.xlsx"),
sheet="Between readers") %>%
rename(tl=`TL (cm)`,sex=Sex,otoliths_R1=`Reader 1`,otoliths_R2=`Reader 2`) %>%
mutate(tl=tl*10)
str(dfB)
write.csv(dfB,file="data/raw_ageing/neves_modelling_2017_B.csv",
quote=FALSE,row.names=FALSE)
dfW <- read_excel(paste0(pth,"S_cantharus_data_Neves_etal.xlsx"),
sheet="Between reads") %>%
rename(tl=`TL (cm)`,sex=Sex,otoliths_R2=`2st read`,otoliths_R3=`3nd read`) %>%
mutate(tl=tl*10)
str(dfW)
write.csv(dfW,file="data/raw_ageing/neves_modelling_2017_W.csv",
quote=FALSE,row.names=FALSE)
|
/data/raw_ageing/aaaOriginals/S_cantharus_data_Neves_etal_MakeRawData.R
|
no_license
|
droglenc/AgePrecision
|
R
| false
| false
| 771
|
r
|
cat("\014"); rm(list=ls())
setwd(here::here())
library(readxl)
library(dplyr)
pth <- "data/raw_ageing/aaaOriginals/"
dfB <- read_excel(paste0(pth,"S_cantharus_data_Neves_etal.xlsx"),
sheet="Between readers") %>%
rename(tl=`TL (cm)`,sex=Sex,otoliths_R1=`Reader 1`,otoliths_R2=`Reader 2`) %>%
mutate(tl=tl*10)
str(dfB)
write.csv(dfB,file="data/raw_ageing/neves_modelling_2017_B.csv",
quote=FALSE,row.names=FALSE)
dfW <- read_excel(paste0(pth,"S_cantharus_data_Neves_etal.xlsx"),
sheet="Between reads") %>%
rename(tl=`TL (cm)`,sex=Sex,otoliths_R2=`2st read`,otoliths_R3=`3nd read`) %>%
mutate(tl=tl*10)
str(dfW)
write.csv(dfW,file="data/raw_ageing/neves_modelling_2017_W.csv",
quote=FALSE,row.names=FALSE)
|
library(lattice)
both<-read.csv("Gitter_Random.csv",header=TRUE)
pdf("PDFFinal.pdf",height = 6, width=6)
densityplot(~resolution_time,data=both,groups = configuration,
plot.points = FALSE, ref = TRUE,
auto.key=list(space="top", columns=2),xlab = "Resolution Time in Hours",ylim=c(-0.00001,0.00065), region=TRUE)
dev.off()
#, lty=c(2,1)
library(lattice)
library(latticeExtra)
gitter<-read.csv("ResolutionTime_GitterIssues.csv",header=TRUE)
random<-read.csv("ResolutionTime-RandomIssues.csv",header=TRUE)
gitter_res_time<-gitter$resolution_time[1:8920]
random_res_time<-random$resolution_time[1:8920]
df <- data.frame(gitter_res_time,random_res_time)
Gitter<-gitter_res_time
Random<-random$res_time
pdf("CDFFull.pdf",height = 5, width=6)
ecdfplot(~ Gitter + Random, data=df , auto.key =list(space="top", columns=2,text=c("Gitter-issues","Random-issues")),
xlab="Resolution Time in Hours") #//CDFFULL.pdf
dev.off()
pdf("CDF.pdf",height = 5, width=6)
ecdfplot(~ Gitter + Random, data=df , auto.key =list(space="top", columns=2,text=c("Gitter-issues","Random-issues")),
xlab="Resolution Time in Hours",ylim=c(0.95,1.001)) #//CDF
dev.off()
|
/Scripts_Graphs/PDFandCDF.R
|
no_license
|
Hareem-E-Sahar/gitter
|
R
| false
| false
| 1,300
|
r
|
library(lattice)
both<-read.csv("Gitter_Random.csv",header=TRUE)
pdf("PDFFinal.pdf",height = 6, width=6)
densityplot(~resolution_time,data=both,groups = configuration,
plot.points = FALSE, ref = TRUE,
auto.key=list(space="top", columns=2),xlab = "Resolution Time in Hours",ylim=c(-0.00001,0.00065), region=TRUE)
dev.off()
#, lty=c(2,1)
library(lattice)
library(latticeExtra)
gitter<-read.csv("ResolutionTime_GitterIssues.csv",header=TRUE)
random<-read.csv("ResolutionTime-RandomIssues.csv",header=TRUE)
gitter_res_time<-gitter$resolution_time[1:8920]
random_res_time<-random$resolution_time[1:8920]
df <- data.frame(gitter_res_time,random_res_time)
Gitter<-gitter_res_time
Random<-random$res_time
pdf("CDFFull.pdf",height = 5, width=6)
ecdfplot(~ Gitter + Random, data=df , auto.key =list(space="top", columns=2,text=c("Gitter-issues","Random-issues")),
xlab="Resolution Time in Hours") #//CDFFULL.pdf
dev.off()
pdf("CDF.pdf",height = 5, width=6)
ecdfplot(~ Gitter + Random, data=df , auto.key =list(space="top", columns=2,text=c("Gitter-issues","Random-issues")),
xlab="Resolution Time in Hours",ylim=c(0.95,1.001)) #//CDF
dev.off()
|
#SDC Example Circles
library(gridExtra)
c.df <- data.frame(c.rad=c(20,40,60,80,100,150,200,250,300,400,500,1000),
sdc=NA)
c.polys=c.rad=c.plots=list()
for(r in 1:nrow(c.df)){
c.rad[[r]] <- c.df[r,"c.rad"]
coords_sp <- SpatialPoints(data.frame(x=0,y=0),CRS("+proj=aea +lat_1=34 +lat_2=40.5 +lat_0=0 +lon_0=-120 +x_0=0 +y_0=-4000000 +datum=NAD83 +units=m +no_defs +ellps=GRS80 +towgs84=0,0,0")) #CRS EPSG:3310, NAD83 CA Albers
c.poly <- gBuffer(coords_sp,width=c.rad[[r]],quadsegs = 20)
c.poly.decay <- decay(c.poly,buf_inc=10,buf_max=1000,name=as.character(c.rad[[r]]))
c.df[r,"sdc"] <- calculate.sdc(c.poly.decay)[,"sdc.name"] %>% unique()
c.polys[[r]] <- c.poly
c.plots[[r]] <-
ggplot()+
geom_path(data=c.poly,aes(x=long,y=lat))+
geom_segment(aes_string(x=0,y=0,xend=c.df[r,"c.rad"],yend=0))+
annotate("text",x=-1000,y=900,label=
paste("r =",c.rad[[r]]),hjust=0,vjust=0,size=3)+
annotate("text",x=-1000,y=-1000,label=
paste("a =\n",round(0.0001*pi*c.rad[[r]]^2,2), "ha"),hjust=0,vjust=0,size=3)+
lims(x=c(-1000,1000),y=c(-1000,1000))+
labs(title=paste0("sdc = ",c.df[r,"sdc"],
"\n ln(sdc) = ", round(log(as.double(c.df[r,"sdc"])),3)))+
theme_bw()+
theme(plot.title = element_text(hjust = 0.5))
}
png(file = paste0("./Manuscripts/MS2- Trends/Figures/Fig1_",Sys.Date(),".png"),width=8,height=12,units="in",res=200)
do.call("grid.arrange", c(c.plots, ncol=3))
dev.off()
|
/Code/MS2- Trends Code/3. SDC_Example_Circles.R
|
no_license
|
stevensjt/Mixed_Severity
|
R
| false
| false
| 1,491
|
r
|
#SDC Example Circles
library(gridExtra)
c.df <- data.frame(c.rad=c(20,40,60,80,100,150,200,250,300,400,500,1000),
sdc=NA)
c.polys=c.rad=c.plots=list()
for(r in 1:nrow(c.df)){
c.rad[[r]] <- c.df[r,"c.rad"]
coords_sp <- SpatialPoints(data.frame(x=0,y=0),CRS("+proj=aea +lat_1=34 +lat_2=40.5 +lat_0=0 +lon_0=-120 +x_0=0 +y_0=-4000000 +datum=NAD83 +units=m +no_defs +ellps=GRS80 +towgs84=0,0,0")) #CRS EPSG:3310, NAD83 CA Albers
c.poly <- gBuffer(coords_sp,width=c.rad[[r]],quadsegs = 20)
c.poly.decay <- decay(c.poly,buf_inc=10,buf_max=1000,name=as.character(c.rad[[r]]))
c.df[r,"sdc"] <- calculate.sdc(c.poly.decay)[,"sdc.name"] %>% unique()
c.polys[[r]] <- c.poly
c.plots[[r]] <-
ggplot()+
geom_path(data=c.poly,aes(x=long,y=lat))+
geom_segment(aes_string(x=0,y=0,xend=c.df[r,"c.rad"],yend=0))+
annotate("text",x=-1000,y=900,label=
paste("r =",c.rad[[r]]),hjust=0,vjust=0,size=3)+
annotate("text",x=-1000,y=-1000,label=
paste("a =\n",round(0.0001*pi*c.rad[[r]]^2,2), "ha"),hjust=0,vjust=0,size=3)+
lims(x=c(-1000,1000),y=c(-1000,1000))+
labs(title=paste0("sdc = ",c.df[r,"sdc"],
"\n ln(sdc) = ", round(log(as.double(c.df[r,"sdc"])),3)))+
theme_bw()+
theme(plot.title = element_text(hjust = 0.5))
}
png(file = paste0("./Manuscripts/MS2- Trends/Figures/Fig1_",Sys.Date(),".png"),width=8,height=12,units="in",res=200)
do.call("grid.arrange", c(c.plots, ncol=3))
dev.off()
|
## Soil color model fitting
# Ranger models of L a b for 2 depths
# Predictand: L
# Covariates: Environs
library(ranger);library(caret)
root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soilColour/data/processed_1/"
capture.root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soilColour/models/model_fitting/model_12/"
var_nm1<- paste0(capture.root,"rangerModel_params_surface_model_12.txt")
var_nm2<- paste0(capture.root,"rangerModel_params_subsoil_model_12.txt")
mod.out.sur<- paste0(capture.root,"surface_model_12.rds")
mod.out.sub<- paste0(capture.root,"subsoil_model_12.rds")
top.data<- readRDS(paste0(root,"tern_soilcolor_siteDat_covariates_surface_calset.rds"))
#### SURFACE MODELS
# B [continuous models]
# model tuning parameters
tgrid <- expand.grid(
.mtry = 41,
.splitrule= "variance",
.min.node.size = 5)
names(top.data)
ranger.model1<-train(x= top.data[,c(9,10,12:50)], y= top.data$B,
tuneGrid = tgrid,
method = "ranger",
trControl =trainControl(method = "oob"),
num.trees = 500,
importance = 'impurity')
#save file
saveRDS(ranger.model1, file = mod.out.sur )
summary(ranger.model1)
ranger.model1
varImp(ranger.model1, scale=FALSE)
## capture output
out1<- capture.output(summary(ranger.model1))
out1<- paste0(out1,collapse = "\r\n")
cat(out1, file = var_nm1, sep=",", append = T)
out2<- capture.output(ranger.model1)
out2<- paste0(out2,collapse = "\r\n")
cat(out2, file = var_nm1, sep=",", append = T)
out3<- capture.output(varImp(ranger.model1, scale=FALSE))
out3<- paste0(out3,collapse = "\r\n")
cat(out3, file = var_nm1, sep=",", append = T)
sub.data<- readRDS(paste0(root,"tern_soilcolor_siteDat_covariates_subsoil_calset.rds"))
#### SUBSOIL MODELS
# B [continuous variable]
# model tuning parameters
tgrid <- expand.grid(
.mtry = 41,
.splitrule= "variance",
.min.node.size = 5)
names(sub.data)
ranger.model1<-train(x= sub.data[,c(9,10,12:50)], y= sub.data$B,
tuneGrid = tgrid,
method = "ranger",
trControl =trainControl(method = "oob"),
num.trees = 500,
importance = 'impurity')
#save file
saveRDS(ranger.model1, file = mod.out.sub)
summary(ranger.model1)
ranger.model1
varImp(ranger.model1, scale=FALSE)
## capture output
out1<- capture.output(summary(ranger.model1))
out1<- paste0(out1,collapse = "\r\n")
cat(out1, file = var_nm2, sep=",", append = T)
out2<- capture.output(ranger.model1)
out2<- paste0(out2,collapse = "\r\n")
cat(out2, file = var_nm2, sep=",", append = T)
out3<- capture.output(varImp(ranger.model1, scale=FALSE))
out3<- paste0(out3,collapse = "\r\n")
cat(out3, file = var_nm2, sep=",", append = T)
#END
|
/Production/DSM/SoilColour/digitalsoilmapping/models/model_fitting/model_12/model_12_both.R
|
permissive
|
AusSoilsDSM/SLGA
|
R
| false
| false
| 2,841
|
r
|
## Soil color model fitting
# Ranger models of L a b for 2 depths
# Predictand: L
# Covariates: Environs
library(ranger);library(caret)
root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soilColour/data/processed_1/"
capture.root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soilColour/models/model_fitting/model_12/"
var_nm1<- paste0(capture.root,"rangerModel_params_surface_model_12.txt")
var_nm2<- paste0(capture.root,"rangerModel_params_subsoil_model_12.txt")
mod.out.sur<- paste0(capture.root,"surface_model_12.rds")
mod.out.sub<- paste0(capture.root,"subsoil_model_12.rds")
top.data<- readRDS(paste0(root,"tern_soilcolor_siteDat_covariates_surface_calset.rds"))
#### SURFACE MODELS
# B [continuous models]
# model tuning parameters
tgrid <- expand.grid(
.mtry = 41,
.splitrule= "variance",
.min.node.size = 5)
names(top.data)
ranger.model1<-train(x= top.data[,c(9,10,12:50)], y= top.data$B,
tuneGrid = tgrid,
method = "ranger",
trControl =trainControl(method = "oob"),
num.trees = 500,
importance = 'impurity')
#save file
saveRDS(ranger.model1, file = mod.out.sur )
summary(ranger.model1)
ranger.model1
varImp(ranger.model1, scale=FALSE)
## capture output
out1<- capture.output(summary(ranger.model1))
out1<- paste0(out1,collapse = "\r\n")
cat(out1, file = var_nm1, sep=",", append = T)
out2<- capture.output(ranger.model1)
out2<- paste0(out2,collapse = "\r\n")
cat(out2, file = var_nm1, sep=",", append = T)
out3<- capture.output(varImp(ranger.model1, scale=FALSE))
out3<- paste0(out3,collapse = "\r\n")
cat(out3, file = var_nm1, sep=",", append = T)
sub.data<- readRDS(paste0(root,"tern_soilcolor_siteDat_covariates_subsoil_calset.rds"))
#### SUBSOIL MODELS
# B [continuous variable]
# model tuning parameters
tgrid <- expand.grid(
.mtry = 41,
.splitrule= "variance",
.min.node.size = 5)
names(sub.data)
ranger.model1<-train(x= sub.data[,c(9,10,12:50)], y= sub.data$B,
tuneGrid = tgrid,
method = "ranger",
trControl =trainControl(method = "oob"),
num.trees = 500,
importance = 'impurity')
#save file
saveRDS(ranger.model1, file = mod.out.sub)
summary(ranger.model1)
ranger.model1
varImp(ranger.model1, scale=FALSE)
## capture output
out1<- capture.output(summary(ranger.model1))
out1<- paste0(out1,collapse = "\r\n")
cat(out1, file = var_nm2, sep=",", append = T)
out2<- capture.output(ranger.model1)
out2<- paste0(out2,collapse = "\r\n")
cat(out2, file = var_nm2, sep=",", append = T)
out3<- capture.output(varImp(ranger.model1, scale=FALSE))
out3<- paste0(out3,collapse = "\r\n")
cat(out3, file = var_nm2, sep=",", append = T)
#END
|
# This script prepares data for valuation
# 1. function for salary scale
# - add_salary_full
# 2. function for distribution of new entrants
# add_entrantsDist
# 3. function adjusting retirement rates for modeling
#*******************************************************************************
# Adjustments to tier parameters ####
#*******************************************************************************
adj_tierParams <- function(tierData,
val_paramlist_ = val_paramlist,
Global_paramlist_ = Global_paramlist){
#
# tierData <- ls_tierData[[1]]
# val_paramlist_ <- val_paramlist
# Global_paramlist_ <- Global_paramlist
# Override assumed cola:
assign_parmsList(val_paramlist_, envir = environment())
if(str_detect(tierData$tier_name, "t1")){
if(!is.na(val_paramlist_$cola_assumed_override_t1)){
tierData$tier_params$cola_assumed <- val_paramlist_$cola_assumed_override_t1
}
}
if(str_detect(tierData$tier_name, "t1")){
if(!is.na(val_paramlist_$fasyears_override_t1)){
tierData$tier_params$fasyears <- val_paramlist_$fasyears_override_t1
}
}
if(str_detect(tierData$tier_name, "t2")){
if(!is.na(val_paramlist_$cola_assumed_override_t2)){
tierData$tier_params$cola_assumed <- val_paramlist_$cola_assumed_override_t2
}
}
return(tierData)
}
#*******************************************************************************
# Constructing full salary schedule #####
#*******************************************************************************
add_salary_full <- function(tierData,
Global_paramlist_ = Global_paramlist,
val_paramlist_ = val_paramlist
){
# dev --
# tierData <- ls_tierData[[tierName]]
# Global_paramlist_ <- Global_paramlist
# val_paramlist_ <- val_paramlist
# dev --
assign_parmsList(Global_paramlist_, envir = environment()) # environment() returns the local environment of the function.
assign_parmsList(val_paramlist_, envir = environment())
salScale_tier <- tierData$df_salScale
df_n_actives_tier <- tierData$df_n_actives
# Step 0: Check compatibility
# max yos
max_yos_model <- max_retAge - min_age
max_yos_tier <- max(salScale_tier$yos)
if(!max_yos_model <= max_yos_tier) stop("Incomplete yos range")
# ea range, N/A for MEPERS
# ea_range_tier <- range(salScale_tier$ea)
# ea_range_model <- range(range_ea)
# if(!ea_range_tier[1]<=ea_range_model[1] |
# !ea_range_tier[2]>=ea_range_model[2]) stop("Incomplete ea range")
## Step 1. Create complete salary scale
# This step generates a complete salary scale for all combos of starting year, entry ages and ages relevant to
# to model.
# - Salaries in year 1 are set to 1.
# - For future workers (entry year greater than 1) whose spans of career years do not include year 1,
# assumption about their starting salary levels is applied.
# salScale_tier
range_start_year <- (1 - (max_age - min_age)):nyear # smallest relevant start year is the entry year who is at the max_age in year 1
range_age_actives <- min_age:(max_retAge - 1)
salScale_full <-
expand_grid(start_year = range_start_year,
ea = range_ea,
age = range_age_actives) %>%
filter(age >= ea,
start_year + (max_retAge - 1 - ea) >= 1 # workers must stay in workforce at least up to year 1.
) %>%
mutate(yos = age - ea) %>%
#left_join(select(salScale_tier, yos, salScale)) %>% #, by = c("yos")) %>%
left_join(salScale_tier) %>%
group_by(start_year, ea) %>%
mutate(year = start_year + (age - ea),
growth_start = (1 + startingSalgrowth)^(start_year - 1), # assume starting salary grows at the assumed value for all entry ages for all years
scale_cum = cumprod(ifelse(age == ea, 1, lag(1 + salScale))),
scale_cum = ifelse(start_year <= 1, scale_cum/scale_cum[year == 1], # Salary levels before starting year are scaled based on salary in the initial year.
scale_cum * growth_start)) %>%
ungroup %>%
mutate(year = init_year + year - 1,
start_year = init_year + start_year - 1 # convert to valuation year
) %>%
select(start_year, ea, age, year, scale_cum) %>%
arrange(start_year, ea, age)
# salScale_full %>% filter(start_year ==2021, ea == 30)
## Step 2. Supplement the inital salary table with all starting salary
# This function generates a table of initial salary (year 1) which include all starting salary levels (age = ea)
# If the starting salary is missing from the actives data frame, spline function is used to interpolate and/or
# extraploate the missing values.
salary_tier <-
df_n_actives_tier %>%
mutate(age = ea + yos) %>%
select(ea, age, salary)
salary_start <-
salary_tier %>%
as.data.frame() %>% # splong does not work well with tibbles
splong("ea", range_ea, method = "natural") %>%
filter(age == ea) %>%
select(-age) %>%
splong("ea", range_ea) %>%
mutate(age = ea,
salary = ifelse(salary < 0, 0, salary))
salary_tier <- rbind(salary_tier, salary_start)
salary_tier <- salary_tier[!duplicated(salary_tier[c("age","ea")]),]
# Step 3. Create complete salary history
salary_full <-
salScale_full %>%
left_join(salary_tier, by = c("ea", "age")) %>%
group_by(start_year, ea) %>%
mutate(salary = na2zero(salary),
sx = ifelse(start_year <= init_year,
salary[year == init_year] * scale_cum,
salary[age == ea]* scale_cum)) %>%
select(start_year, ea, age, year, sx)
# salary_full %>% filter(start_year == 2015 )
tierData$salary_full <- salary_full
return(tierData)
}
#*******************************************************************************
# Infering ditribution of entrants from low yos actives #####
#*******************************************************************************
add_entrantsDist <- function(tierData,
yos_max = 3,
val_paramlist_ = val_paramlist,
Global_paramlist_ = Global_paramlist,
simple = FALSE){
# Simple imputation rule is applied under the following circumstances:
# 1. parameter "simple" is set to TRUE
# 2. negative weights are generated by the regular rule.
## dev --
# tierData <- ls_tierData[[tierName]]
# yos_max <- 3
# val_paramlist_ <- val_paramlist
# Global_paramlist_ <- Global_paramlist
## dev --
assign_parmsList(Global_paramlist_, envir = environment())
assign_parmsList(val_paramlist_, envir = environment())
nactives_tier <-
tierData$df_n_actives %>%
mutate(age = ea + yos) %>%
select(age, ea, nactives)
# nactives_tier
# For safty, do interpolation for potential missing cells.
nactives_tier <- splong(nactives_tier, "ea", range_ea) %>% splong("age", range_ea) %>% filter(age >= ea)
nactives_tier %>% spread(age, nactives)
# For each ea, Calculate average number of members with yos <= yos_max
entrants <-
nactives_tier %>%
filter(age - ea <= yos_max) %>%
group_by(ea) %>%
summarise(avg_ent = mean(nactives), .groups = "drop")
# Check negative numbers
neg_ea <- entrants[which(entrants$avg_ent < 0), "ea"]
if(any(entrants$avg_ent < 0)){warning("\n", tierData$tier_params$tier_name, ":", "\n", "Negative inferred value(s) in the following entry age(s): " , as.character(neg_ea), "\n", "Values will be coerced to 0." )
#" Simple imputation rule is applied")
#ent <- nact1
}
entrants %<>%
mutate(avg_ent = ifelse(avg_ent < 0, 0, avg_ent))
# ## Distributon by simple rule
# nact1 <- nact %>% filter(age - ea <= 4) %>% group_by(ea) %>% summarise(avg_ent = mean(nactives)) %>% right_join(data.frame(ea = range_ea))
# N <- 1
# while(any(is.na(nact1$avg_ent))) {
# if(N <= length(nact1)) nact1 %<>% mutate(avg_ent = ifelse(is.na(avg_ent), lag(avg_ent) , avg_ent)) else
# nact1 %<>% mutate(avg_ent = ifelse(is.na(avg_ent), lead(avg_ent) , avg_ent))
# N <- N + 1
# }
# ent %<>% mutate(avg_ent = ifelse(avg_ent < 0, 0, avg_ent))
# if(simple) ent <- nact1
dist <- lowess(entrants$avg_ent, f= 0.1)$y
dist <- dist/sum(dist)
names(dist) <- range_ea
# plot(dist)
# add back to tier data
tierData$entrants_dist <- dist
return(tierData)
}
#*******************************************************************************
# Creating a generational decrement table for the model ####
#*******************************************************************************
expand_decrements <- function(tierData,
val_paramlist_ = val_paramlist,
Global_paramlist_ = Global_paramlist){
# # dev --
#
# tierData <- ls_tierData[[tierName]]
# val_paramlist_ <- val_paramlist
# Global_paramlist_ <- Global_paramlist
#
# # dev --
assign_parmsList(Global_paramlist_, envir = environment())
assign_parmsList(val_paramlist_, envir = environment())
decrements_tier <- tierData$decrements
#decrements_tier
# dims of decrement_ter: ea x age
# dims of expanded decrement table: year x ea x age
# range_start_year <- 1915:(init_year + nyear - 1)
# starting from 1915 is more than enough, just be safe
range_year_decrements <- 1915:(init_year + nyear + max_age)
range_start_year <- 1915:(init_year + nyear - 1)
decrements_tier_expanded <-
expand_grid(#year = range_year_decrements,
start_year = range_start_year,
age = range_age,
ea = range_ea) %>%
mutate(yos = age - ea,
year = start_year + yos) %>%
filter(age >= ea
# start_year + (max_retAge - 1 - ea) >= 1
) %>%
left_join(decrements_tier, by = c("ea", "age", "yos")) %>%
colwise(na2zero)(.) %>%
relocate(year, ea, age, yos)%>%
arrange(year, ea, age)
tierData$decrements_expanded <- decrements_tier_expanded
return(tierData)
}
#*******************************************************************************
# Apply improvement ####
#*******************************************************************************
# plan specific
apply_decImprovements <- function(tierData,
val_paramlist_ = val_paramlist,
Global_paramlist_ = Global_paramlist){
#
# dev --
tierData <- ls_tierData[[tierName]]
val_paramlist_ <- val_paramlist
Global_paramlist_ <- Global_paramlist
# dev --
assign_parmsList(Global_paramlist_, envir = environment())
assign_parmsList(val_paramlist_, envir = environment())
decrements_expanded <- tierData$decrements_expanded
decrements_improvement <- tierData$decrements_improvement
## range of age the original improvement table covers
range_year_imprTab <- range(decrements_improvement$year)
## expand the improvement table to cover all ages in range_age
# For each age, filling the missing years with the end values in the original tabel
decrements_improvement <-
expand_grid(year = decrements_expanded$year %>% unique(),
age = range_age) %>%
left_join(decrements_improvement, by = c("year", "age")) %>%
group_by(age) %>%
mutate(across(
!c(year), # should not include the grouping variable
~ifelse(year < range_year_imprTab[1], .x[year == range_year_imprTab[1]], .x )
)) %>%
mutate(across(
!c(year),
~ifelse(year > range_year_imprTab[2], .x[year == range_year_imprTab[2]], .x )
)) %>%
ungroup
# decrements_improvement %>% arrange(age, year)
## Merging the improvement table to the expanded decrement table and adjust
# the according decrement rates
# decrements_expanded %<>%
# left_join(decrements_improvement, by = c("year", "age")) %>%
#
# # applying improvements
# mutate(qxm.post_female = qxm.post_female * impr_qxm.post_female,
# qxm.post_male = qxm.post_male * impr_qxm.post_male,
#
# qxmd.post.nonocc_female = qxmd.post.nonocc_female * impr_qxmd.post.nonocc_female,
# qxmd.post.nonocc_male = qxmd.post.nonocc_male * impr_qxmd.post.nonocc_male,
#
# qxmd.post.occ_female = qxmd.post.occ_female * impr_qxmd.post.occ_female,
# qxmd.post.occ_male = qxmd.post.occ_male * impr_qxmd.post.occ_male
# )
decrements_expanded %<>%
left_join(decrements_improvement, by = c("year", "age")) %>%
# applying improvements
mutate(qxm.post_female = qxm.post_female* impr_female,
qxm.post_male = qxm.post_male * impr_male,
# qxm.pre_female = qxm.pre_female* impr_female,
# qxm.pre_male = qxm.pre_male * impr_male,
qxm.defrRet_female = qxm.defrRet_female* impr_female,
qxm.defrRet_male = qxm.defrRet_male * impr_male,
qxmd.post_female = qxmd.post_female* impr_female,
qxmd.post_male = qxmd.post_male * impr_male
) %>%
colwise(na2zero)(.) %>%
ungroup
# decrements_expanded %>%
# select(start_year, ea, year, impr_male, impr_female)
#
# decrements_expanded$impr_male %>% range()
## construct uni-sex mortality based on adjusted rates
share_male <- tierData$tier_params$share_male
share_female <- tierData$tier_params$share_female
decrements_expanded %<>%
mutate(
qxm.pre = share_male * qxm.pre_male + share_female * qxm.pre_female,
qxm.post = share_male * qxm.post_male + share_female * qxm.post_female,
qxmd.post = share_male * qxmd.post_male + share_female * qxmd.post_female,
qxm.defrRet = share_male * qxm.defrRet_male + share_female * qxm.defrRet_female
)
#calib_qxm.post <- -0.2
#calib_qxmd.post <- -0.2
## Calibration
decrements_expanded %<>%
mutate(qxm.post = (calib_qxm.post) * qxm.post,
qxmd.post = (calib_qxmd.post) * qxmd.post)
tierData$decrements_expanded <- decrements_expanded
return(tierData)
}
#*******************************************************************************
# Modifying retirement rates for the purpose of modeling ####
#*******************************************************************************
# Adjustment to the decrement table:
# Why
# In Winklevoss, retirement is treated as an immediate event, that is, retirees would start receiving benefit payments
# the same year when they retire. This is different from how diability and death benefits are treated, for which beneficiaries
# start receiving benefits the next year disability/death occurs, and can cause difficulty in modeling retirement with multiple
# possible retirement ages (need to check).
#
# To facilitate modeling and maintain consistent treatment aross all types of benefits,
# we assume that retirement occurs at the end of year t-1 for those who start receiving benefits at the begining of year t. Since
# theoretically year end of t-1 and year begining of t is the same point of time, we maintained the same theoretical treatment of
# retirement in Winklevoss (retirement is a immediate event); while assigning the retirement event and benefit payment event into two model
# periods allow retirement to be modeled the same manner as other benefit types.
# Note that since retirement is assumed to occur at the year end of the previous year, the retirement rate of year t is applied to
# those who survived all other types of separation events (death, termination, disability).
# How
# Move qxr backward by 1 period.(qxr at t is now assigned to t - 1), the probability of retirement at t - 1 is qxr(t)*(1 - qxt(t-1) - qxm(t-1) - qxd(t-1))
# For the age right before the max retirement age (r.max - 1), probability of retirement is 1 - qxm.a - qxd.a - qxt.a,
# which means all active members who survive all other risks at (r.max - 1) will enter the status "retired" for sure at age r.max (and collect the benefit regardless
# whether they will die at r.max)
# share of contigent annuity and life annuity.
# TEMP: For now, assume all members opt for life annuity
adj_retRates <- function(tierData,
val_paramlist_ = val_paramlist,
Global_paramlist_ = Global_paramlist){
# dev --
# tierData <- ls_tierData[[tierName]]
# val_paramlist_ <- val_paramlist
# Global_paramlist_ <- Global_paramlist
# dev --
assign_parmsList(Global_paramlist_, envir = environment())
assign_parmsList(val_paramlist_, envir = environment())
# decrements_tier <- tierData$decrements
decrements_expanded <- tierData$decrements_expanded
#decrements_tier %>% head
## For now, assume all retirees choose life annuity
# - la: life annuity
# - ca: Contingent annuity
pct_ca <- 0 # percentage choosing contingent annuity
pct_la <- 1 - pct_ca # percentage choosing life annuity
decrements_expanded %<>%
mutate(start_year = year - yos) %>%
group_by(start_year, ea) %>%
mutate(qxr = ifelse(age == max_retAge - 1,
1 - qxt - qxm.pre - qxd,
lead(qxr) * (1 - qxt - qxm.pre - qxd)), # Total probability of retirement
qxr.la = ifelse(age == max_retAge, 0 , qxr * pct_la), # Prob of opting for life annuity
qxr.ca = ifelse(age == max_retAge, 0 , qxr * pct_ca), # Prob of opting for contingent annuity
) %>%
ungroup() %>%
select(-start_year, -yos)
tierData$decrements_expanded <- decrements_expanded
return(tierData)
######!!!! need to construct retirement age dependent mortality for life annuitants.
# For retired(".r"), the only target status is "dead". Note that in practice retirement mortality may differ from the regular mortality.
#mutate(qxm.la.r = qxm.r)
}
#*******************************************************************************
# Adjustments to initial members ####
#*******************************************************************************
adj_initMembers <- function(tierData,
val_paramlist_ = val_paramlist,
Global_paramlist_ = Global_paramlist){
# dev --
# tierData <- ls_tierData[[tierName]]
# val_paramlist_ <- val_paramlist
# Global_paramlist_ <- Global_paramlist
# dev --
assign_parmsList(Global_paramlist_, envir = environment())
assign_parmsList(val_paramlist_, envir = environment())
# For modeling purposes, age of entry and age of retirement must be added to
# data for all types of retirees. The following assumptions are made:
# - Entry age: The minimum age allowed in the model.
# TODO / WARNING: may cause issue if the data has non-zero members at the min age.
# - age of retirement: the current age, which implies that the retirement year
# is the first simulation year
#
tierData$df_n_servRet %<>%
mutate(year = init_year,
ea = min_age,
age_servRet= age,
start_year = year - (age - ea),
benefit_servRet = (1 + B_adjust) * benefit_servRet
) %>%
relocate(grp, start_year, year, ea, age, age_servRet)
# N/A for MEPERS
# tierData$df_n_disbRet %<>%
# mutate(year = init_year,
# ea = min_age,
# age_disbRet= age,
# start_year = year - (age - ea),
# benefit_disbRet = (1 + B_adjust) * benefit_disbRet) %>%
# relocate(grp, start_year, year, ea, age, age_disbRet)
return(tierData)
}
|
/model/valuation/model_val_prepDataFuns.R
|
no_license
|
yimengyin16/model_SJ
|
R
| false
| false
| 20,442
|
r
|
# This script prepares data for valuation
# 1. function for salary scale
# - add_salary_full
# 2. function for distribution of new entrants
# add_entrantsDist
# 3. function adjusting retirement rates for modeling
#*******************************************************************************
# Adjustments to tier parameters ####
#*******************************************************************************
adj_tierParams <- function(tierData,
val_paramlist_ = val_paramlist,
Global_paramlist_ = Global_paramlist){
#
# tierData <- ls_tierData[[1]]
# val_paramlist_ <- val_paramlist
# Global_paramlist_ <- Global_paramlist
# Override assumed cola:
assign_parmsList(val_paramlist_, envir = environment())
if(str_detect(tierData$tier_name, "t1")){
if(!is.na(val_paramlist_$cola_assumed_override_t1)){
tierData$tier_params$cola_assumed <- val_paramlist_$cola_assumed_override_t1
}
}
if(str_detect(tierData$tier_name, "t1")){
if(!is.na(val_paramlist_$fasyears_override_t1)){
tierData$tier_params$fasyears <- val_paramlist_$fasyears_override_t1
}
}
if(str_detect(tierData$tier_name, "t2")){
if(!is.na(val_paramlist_$cola_assumed_override_t2)){
tierData$tier_params$cola_assumed <- val_paramlist_$cola_assumed_override_t2
}
}
return(tierData)
}
#*******************************************************************************
# Constructing full salary schedule #####
#*******************************************************************************
add_salary_full <- function(tierData,
Global_paramlist_ = Global_paramlist,
val_paramlist_ = val_paramlist
){
# dev --
# tierData <- ls_tierData[[tierName]]
# Global_paramlist_ <- Global_paramlist
# val_paramlist_ <- val_paramlist
# dev --
assign_parmsList(Global_paramlist_, envir = environment()) # environment() returns the local environment of the function.
assign_parmsList(val_paramlist_, envir = environment())
salScale_tier <- tierData$df_salScale
df_n_actives_tier <- tierData$df_n_actives
# Step 0: Check compatibility
# max yos
max_yos_model <- max_retAge - min_age
max_yos_tier <- max(salScale_tier$yos)
if(!max_yos_model <= max_yos_tier) stop("Incomplete yos range")
# ea range, N/A for MEPERS
# ea_range_tier <- range(salScale_tier$ea)
# ea_range_model <- range(range_ea)
# if(!ea_range_tier[1]<=ea_range_model[1] |
# !ea_range_tier[2]>=ea_range_model[2]) stop("Incomplete ea range")
## Step 1. Create complete salary scale
# This step generates a complete salary scale for all combos of starting year, entry ages and ages relevant to
# to model.
# - Salaries in year 1 are set to 1.
# - For future workers (entry year greater than 1) whose spans of career years do not include year 1,
# assumption about their starting salary levels is applied.
# salScale_tier
range_start_year <- (1 - (max_age - min_age)):nyear # smallest relevant start year is the entry year who is at the max_age in year 1
range_age_actives <- min_age:(max_retAge - 1)
salScale_full <-
expand_grid(start_year = range_start_year,
ea = range_ea,
age = range_age_actives) %>%
filter(age >= ea,
start_year + (max_retAge - 1 - ea) >= 1 # workers must stay in workforce at least up to year 1.
) %>%
mutate(yos = age - ea) %>%
#left_join(select(salScale_tier, yos, salScale)) %>% #, by = c("yos")) %>%
left_join(salScale_tier) %>%
group_by(start_year, ea) %>%
mutate(year = start_year + (age - ea),
growth_start = (1 + startingSalgrowth)^(start_year - 1), # assume starting salary grows at the assumed value for all entry ages for all years
scale_cum = cumprod(ifelse(age == ea, 1, lag(1 + salScale))),
scale_cum = ifelse(start_year <= 1, scale_cum/scale_cum[year == 1], # Salary levels before starting year are scaled based on salary in the initial year.
scale_cum * growth_start)) %>%
ungroup %>%
mutate(year = init_year + year - 1,
start_year = init_year + start_year - 1 # convert to valuation year
) %>%
select(start_year, ea, age, year, scale_cum) %>%
arrange(start_year, ea, age)
# salScale_full %>% filter(start_year ==2021, ea == 30)
## Step 2. Supplement the inital salary table with all starting salary
# This function generates a table of initial salary (year 1) which include all starting salary levels (age = ea)
# If the starting salary is missing from the actives data frame, spline function is used to interpolate and/or
# extraploate the missing values.
salary_tier <-
df_n_actives_tier %>%
mutate(age = ea + yos) %>%
select(ea, age, salary)
salary_start <-
salary_tier %>%
as.data.frame() %>% # splong does not work well with tibbles
splong("ea", range_ea, method = "natural") %>%
filter(age == ea) %>%
select(-age) %>%
splong("ea", range_ea) %>%
mutate(age = ea,
salary = ifelse(salary < 0, 0, salary))
salary_tier <- rbind(salary_tier, salary_start)
salary_tier <- salary_tier[!duplicated(salary_tier[c("age","ea")]),]
# Step 3. Create complete salary history
salary_full <-
salScale_full %>%
left_join(salary_tier, by = c("ea", "age")) %>%
group_by(start_year, ea) %>%
mutate(salary = na2zero(salary),
sx = ifelse(start_year <= init_year,
salary[year == init_year] * scale_cum,
salary[age == ea]* scale_cum)) %>%
select(start_year, ea, age, year, sx)
# salary_full %>% filter(start_year == 2015 )
tierData$salary_full <- salary_full
return(tierData)
}
#*******************************************************************************
# Infering ditribution of entrants from low yos actives #####
#*******************************************************************************
add_entrantsDist <- function(tierData,
yos_max = 3,
val_paramlist_ = val_paramlist,
Global_paramlist_ = Global_paramlist,
simple = FALSE){
# Simple imputation rule is applied under the following circumstances:
# 1. parameter "simple" is set to TRUE
# 2. negative weights are generated by the regular rule.
## dev --
# tierData <- ls_tierData[[tierName]]
# yos_max <- 3
# val_paramlist_ <- val_paramlist
# Global_paramlist_ <- Global_paramlist
## dev --
assign_parmsList(Global_paramlist_, envir = environment())
assign_parmsList(val_paramlist_, envir = environment())
nactives_tier <-
tierData$df_n_actives %>%
mutate(age = ea + yos) %>%
select(age, ea, nactives)
# nactives_tier
# For safty, do interpolation for potential missing cells.
nactives_tier <- splong(nactives_tier, "ea", range_ea) %>% splong("age", range_ea) %>% filter(age >= ea)
nactives_tier %>% spread(age, nactives)
# For each ea, Calculate average number of members with yos <= yos_max
entrants <-
nactives_tier %>%
filter(age - ea <= yos_max) %>%
group_by(ea) %>%
summarise(avg_ent = mean(nactives), .groups = "drop")
# Check negative numbers
neg_ea <- entrants[which(entrants$avg_ent < 0), "ea"]
if(any(entrants$avg_ent < 0)){warning("\n", tierData$tier_params$tier_name, ":", "\n", "Negative inferred value(s) in the following entry age(s): " , as.character(neg_ea), "\n", "Values will be coerced to 0." )
#" Simple imputation rule is applied")
#ent <- nact1
}
entrants %<>%
mutate(avg_ent = ifelse(avg_ent < 0, 0, avg_ent))
# ## Distributon by simple rule
# nact1 <- nact %>% filter(age - ea <= 4) %>% group_by(ea) %>% summarise(avg_ent = mean(nactives)) %>% right_join(data.frame(ea = range_ea))
# N <- 1
# while(any(is.na(nact1$avg_ent))) {
# if(N <= length(nact1)) nact1 %<>% mutate(avg_ent = ifelse(is.na(avg_ent), lag(avg_ent) , avg_ent)) else
# nact1 %<>% mutate(avg_ent = ifelse(is.na(avg_ent), lead(avg_ent) , avg_ent))
# N <- N + 1
# }
# ent %<>% mutate(avg_ent = ifelse(avg_ent < 0, 0, avg_ent))
# if(simple) ent <- nact1
dist <- lowess(entrants$avg_ent, f= 0.1)$y
dist <- dist/sum(dist)
names(dist) <- range_ea
# plot(dist)
# add back to tier data
tierData$entrants_dist <- dist
return(tierData)
}
#*******************************************************************************
# Creating a generational decrement table for the model ####
#*******************************************************************************
expand_decrements <- function(tierData,
val_paramlist_ = val_paramlist,
Global_paramlist_ = Global_paramlist){
# # dev --
#
# tierData <- ls_tierData[[tierName]]
# val_paramlist_ <- val_paramlist
# Global_paramlist_ <- Global_paramlist
#
# # dev --
assign_parmsList(Global_paramlist_, envir = environment())
assign_parmsList(val_paramlist_, envir = environment())
decrements_tier <- tierData$decrements
#decrements_tier
# dims of decrement_ter: ea x age
# dims of expanded decrement table: year x ea x age
# range_start_year <- 1915:(init_year + nyear - 1)
# starting from 1915 is more than enough, just be safe
range_year_decrements <- 1915:(init_year + nyear + max_age)
range_start_year <- 1915:(init_year + nyear - 1)
decrements_tier_expanded <-
expand_grid(#year = range_year_decrements,
start_year = range_start_year,
age = range_age,
ea = range_ea) %>%
mutate(yos = age - ea,
year = start_year + yos) %>%
filter(age >= ea
# start_year + (max_retAge - 1 - ea) >= 1
) %>%
left_join(decrements_tier, by = c("ea", "age", "yos")) %>%
colwise(na2zero)(.) %>%
relocate(year, ea, age, yos)%>%
arrange(year, ea, age)
tierData$decrements_expanded <- decrements_tier_expanded
return(tierData)
}
#*******************************************************************************
# Apply improvement ####
#*******************************************************************************
# plan specific
apply_decImprovements <- function(tierData,
val_paramlist_ = val_paramlist,
Global_paramlist_ = Global_paramlist){
#
# dev --
tierData <- ls_tierData[[tierName]]
val_paramlist_ <- val_paramlist
Global_paramlist_ <- Global_paramlist
# dev --
assign_parmsList(Global_paramlist_, envir = environment())
assign_parmsList(val_paramlist_, envir = environment())
decrements_expanded <- tierData$decrements_expanded
decrements_improvement <- tierData$decrements_improvement
## range of age the original improvement table covers
range_year_imprTab <- range(decrements_improvement$year)
## expand the improvement table to cover all ages in range_age
# For each age, filling the missing years with the end values in the original tabel
decrements_improvement <-
expand_grid(year = decrements_expanded$year %>% unique(),
age = range_age) %>%
left_join(decrements_improvement, by = c("year", "age")) %>%
group_by(age) %>%
mutate(across(
!c(year), # should not include the grouping variable
~ifelse(year < range_year_imprTab[1], .x[year == range_year_imprTab[1]], .x )
)) %>%
mutate(across(
!c(year),
~ifelse(year > range_year_imprTab[2], .x[year == range_year_imprTab[2]], .x )
)) %>%
ungroup
# decrements_improvement %>% arrange(age, year)
## Merging the improvement table to the expanded decrement table and adjust
# the according decrement rates
# decrements_expanded %<>%
# left_join(decrements_improvement, by = c("year", "age")) %>%
#
# # applying improvements
# mutate(qxm.post_female = qxm.post_female * impr_qxm.post_female,
# qxm.post_male = qxm.post_male * impr_qxm.post_male,
#
# qxmd.post.nonocc_female = qxmd.post.nonocc_female * impr_qxmd.post.nonocc_female,
# qxmd.post.nonocc_male = qxmd.post.nonocc_male * impr_qxmd.post.nonocc_male,
#
# qxmd.post.occ_female = qxmd.post.occ_female * impr_qxmd.post.occ_female,
# qxmd.post.occ_male = qxmd.post.occ_male * impr_qxmd.post.occ_male
# )
decrements_expanded %<>%
left_join(decrements_improvement, by = c("year", "age")) %>%
# applying improvements
mutate(qxm.post_female = qxm.post_female* impr_female,
qxm.post_male = qxm.post_male * impr_male,
# qxm.pre_female = qxm.pre_female* impr_female,
# qxm.pre_male = qxm.pre_male * impr_male,
qxm.defrRet_female = qxm.defrRet_female* impr_female,
qxm.defrRet_male = qxm.defrRet_male * impr_male,
qxmd.post_female = qxmd.post_female* impr_female,
qxmd.post_male = qxmd.post_male * impr_male
) %>%
colwise(na2zero)(.) %>%
ungroup
# decrements_expanded %>%
# select(start_year, ea, year, impr_male, impr_female)
#
# decrements_expanded$impr_male %>% range()
## construct uni-sex mortality based on adjusted rates
share_male <- tierData$tier_params$share_male
share_female <- tierData$tier_params$share_female
decrements_expanded %<>%
mutate(
qxm.pre = share_male * qxm.pre_male + share_female * qxm.pre_female,
qxm.post = share_male * qxm.post_male + share_female * qxm.post_female,
qxmd.post = share_male * qxmd.post_male + share_female * qxmd.post_female,
qxm.defrRet = share_male * qxm.defrRet_male + share_female * qxm.defrRet_female
)
#calib_qxm.post <- -0.2
#calib_qxmd.post <- -0.2
## Calibration
decrements_expanded %<>%
mutate(qxm.post = (calib_qxm.post) * qxm.post,
qxmd.post = (calib_qxmd.post) * qxmd.post)
tierData$decrements_expanded <- decrements_expanded
return(tierData)
}
#*******************************************************************************
# Modifying retirement rates for the purpose of modeling ####
#*******************************************************************************
# Adjustment to the decrement table:
# Why
# In Winklevoss, retirement is treated as an immediate event, that is, retirees would start receiving benefit payments
# the same year when they retire. This is different from how diability and death benefits are treated, for which beneficiaries
# start receiving benefits the next year disability/death occurs, and can cause difficulty in modeling retirement with multiple
# possible retirement ages (need to check).
#
# To facilitate modeling and maintain consistent treatment aross all types of benefits,
# we assume that retirement occurs at the end of year t-1 for those who start receiving benefits at the begining of year t. Since
# theoretically year end of t-1 and year begining of t is the same point of time, we maintained the same theoretical treatment of
# retirement in Winklevoss (retirement is a immediate event); while assigning the retirement event and benefit payment event into two model
# periods allow retirement to be modeled the same manner as other benefit types.
# Note that since retirement is assumed to occur at the year end of the previous year, the retirement rate of year t is applied to
# those who survived all other types of separation events (death, termination, disability).
# How
# Move qxr backward by 1 period.(qxr at t is now assigned to t - 1), the probability of retirement at t - 1 is qxr(t)*(1 - qxt(t-1) - qxm(t-1) - qxd(t-1))
# For the age right before the max retirement age (r.max - 1), probability of retirement is 1 - qxm.a - qxd.a - qxt.a,
# which means all active members who survive all other risks at (r.max - 1) will enter the status "retired" for sure at age r.max (and collect the benefit regardless
# whether they will die at r.max)
# share of contigent annuity and life annuity.
# TEMP: For now, assume all members opt for life annuity
adj_retRates <- function(tierData,
val_paramlist_ = val_paramlist,
Global_paramlist_ = Global_paramlist){
# dev --
# tierData <- ls_tierData[[tierName]]
# val_paramlist_ <- val_paramlist
# Global_paramlist_ <- Global_paramlist
# dev --
assign_parmsList(Global_paramlist_, envir = environment())
assign_parmsList(val_paramlist_, envir = environment())
# decrements_tier <- tierData$decrements
decrements_expanded <- tierData$decrements_expanded
#decrements_tier %>% head
## For now, assume all retirees choose life annuity
# - la: life annuity
# - ca: Contingent annuity
pct_ca <- 0 # percentage choosing contingent annuity
pct_la <- 1 - pct_ca # percentage choosing life annuity
decrements_expanded %<>%
mutate(start_year = year - yos) %>%
group_by(start_year, ea) %>%
mutate(qxr = ifelse(age == max_retAge - 1,
1 - qxt - qxm.pre - qxd,
lead(qxr) * (1 - qxt - qxm.pre - qxd)), # Total probability of retirement
qxr.la = ifelse(age == max_retAge, 0 , qxr * pct_la), # Prob of opting for life annuity
qxr.ca = ifelse(age == max_retAge, 0 , qxr * pct_ca), # Prob of opting for contingent annuity
) %>%
ungroup() %>%
select(-start_year, -yos)
tierData$decrements_expanded <- decrements_expanded
return(tierData)
######!!!! need to construct retirement age dependent mortality for life annuitants.
# For retired(".r"), the only target status is "dead". Note that in practice retirement mortality may differ from the regular mortality.
#mutate(qxm.la.r = qxm.r)
}
#*******************************************************************************
# Adjustments to initial members ####
#*******************************************************************************
adj_initMembers <- function(tierData,
val_paramlist_ = val_paramlist,
Global_paramlist_ = Global_paramlist){
# dev --
# tierData <- ls_tierData[[tierName]]
# val_paramlist_ <- val_paramlist
# Global_paramlist_ <- Global_paramlist
# dev --
assign_parmsList(Global_paramlist_, envir = environment())
assign_parmsList(val_paramlist_, envir = environment())
# For modeling purposes, age of entry and age of retirement must be added to
# data for all types of retirees. The following assumptions are made:
# - Entry age: The minimum age allowed in the model.
# TODO / WARNING: may cause issue if the data has non-zero members at the min age.
# - age of retirement: the current age, which implies that the retirement year
# is the first simulation year
#
tierData$df_n_servRet %<>%
mutate(year = init_year,
ea = min_age,
age_servRet= age,
start_year = year - (age - ea),
benefit_servRet = (1 + B_adjust) * benefit_servRet
) %>%
relocate(grp, start_year, year, ea, age, age_servRet)
# N/A for MEPERS
# tierData$df_n_disbRet %<>%
# mutate(year = init_year,
# ea = min_age,
# age_disbRet= age,
# start_year = year - (age - ea),
# benefit_disbRet = (1 + B_adjust) * benefit_disbRet) %>%
# relocate(grp, start_year, year, ea, age, age_disbRet)
return(tierData)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expressionBased.R
\name{getmiRNACount}
\alias{getmiRNACount}
\title{Get TCGA miRNAseq expression of miRNA genes for the given cancer}
\usage{
getmiRNACount(mirnagene, cancer, databaseFile)
}
\arguments{
\item{mirnagene}{Data frame of the mature format}
\item{cancer}{Name of the TCGA project code such as 'BRCA'}
\item{databaseFile}{Path of miRcancer.db file}
}
\value{
Data frame of the raw read count of the given miRNA genes
for different patients
}
\description{
Get TCGA miRNAseq expression of miRNA genes for the given cancer
}
|
/man/getmiRNACount.Rd
|
no_license
|
guldenolgun/NoRCE
|
R
| false
| true
| 620
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expressionBased.R
\name{getmiRNACount}
\alias{getmiRNACount}
\title{Get TCGA miRNAseq expression of miRNA genes for the given cancer}
\usage{
getmiRNACount(mirnagene, cancer, databaseFile)
}
\arguments{
\item{mirnagene}{Data frame of the mature format}
\item{cancer}{Name of the TCGA project code such as 'BRCA'}
\item{databaseFile}{Path of miRcancer.db file}
}
\value{
Data frame of the raw read count of the given miRNA genes
for different patients
}
\description{
Get TCGA miRNAseq expression of miRNA genes for the given cancer
}
|
## Test posterior sampling functions
## load packages
library("testthat")
library("mgcv")
library("gratia")
test_that("smooth_samples works for a continuous by GAM", {
expect_silent(sm <- smooth_samples(su_m_cont_by, n = 5, n_vals = 100,
seed = 42))
expect_s3_class(sm, c("smooth_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 500 == 1 smooth * 5 * 100
expect_identical(NROW(sm), 500L)
expect_identical(NCOL(sm), 8L) # 8 cols, univatiate smooths
})
test_that("smooth_samples works for a simple GAM", {
expect_silent(sm <- smooth_samples(m_1_smooth, n = 5, n_vals = 100,
seed = 42))
expect_s3_class(sm, c("smooth_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 500 == 1 smooth * 5 * 100
expect_identical(NROW(sm), 500L)
expect_identical(NCOL(sm), 8L) # 8 cols, univatiate smooths
})
test_that("smooth_samples works for a multi-smooth GAM", {
expect_silent(sm <- smooth_samples(m_gam, n = 5, n_vals = 100, seed = 42))
expect_s3_class(sm, c("smooth_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 2000 == 4 smooths * 5 * 100
expect_identical(NROW(sm), 2000L)
expect_identical(NCOL(sm), 11L) # 11 cols, 4 univatiate smooths
})
test_that("smooth_samples works for a multi-smooth factor by GAM", {
expect_silent(sm <- smooth_samples(su_m_factor_by, n = 5, n_vals = 50,
seed = 42))
expect_s3_class(sm, c("smooth_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 2000 == 1 + (1 * 3) smooths * 5 * 50
expect_identical(NROW(sm), 1000L)
expect_identical(NCOL(sm), 10L) # 10 cols, univatiate smooths with factor
})
test_that("smooth_samples() fails if not suitable method available", {
expect_error(smooth_samples(1:10),
"Don't know how to sample from the posterior of <integer>",
fixed = TRUE)
})
test_that("smooth_samples sets seed when seed not provided", {
expect_silent(smooth_samples(m_gam, seed = NULL))
})
test_that("smooth_samples works with term provided", {
expect_silent(sm <- smooth_samples(m_gam, term = "s(x2)", seed = 42))
})
test_that("smooth_samples errors with invalid term provided", {
expect_error(sm <- smooth_samples(m_gam, term = "s(x10)", seed = 42),
"None of the terms matched a smooth.", fixed = TRUE)
})
# from #121
test_that("smooth_samples gets the right factor by smooth: #121", {
expect_silent(sm <- smooth_samples(su_m_factor_by, n = 5, n_vals = 100,
term = "s(x2):fac2", seed = 42))
# factor level of `fac` column should be 2
expect_identical(all(sm["fac"] == 2), TRUE)
})
# from #121 - problems when model contains ranef smooths
test_that("smooth_samples ignores ranef smooths: #121", {
expect_message(sm <- smooth_samples(rm1, n = 5, n_vals = 100, seed = 42),
"Random effect smooths not currently supported.")
# given n and n_vals and 4 smooths, nrow == 2000L
expect_identical(nrow(sm), 2000L)
# shouldn't have "s(fac)" in sm
expect_identical(any(sm$smooth == "s(fac)"), FALSE)
})
test_that("smooth_samples fails if no smooths left to sample from", {
expect_error(sm <- smooth_samples(rm1, term = "s(fac)",
n = 5, n_vals = 100, seed = 42),
"No smooths left that can be sampled from.")
})
test_that("fitted_samples works for a simple GAM", {
expect_silent(sm <- fitted_samples(m_1_smooth, n = 5, seed = 42))
expect_s3_class(sm, c("fitted_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 1000 == 5 * 200 (nrow(dat))
expect_identical(NROW(sm), 1000L)
expect_identical(NCOL(sm), 3L) # 3 cols
expect_named(sm, expected = c("row", "draw", "fitted"))
})
test_that("fitted_samples works for a multi-smooth GAM", {
expect_silent(sm <- fitted_samples(m_gam, n = 5, seed = 42))
expect_s3_class(sm, c("fitted_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 5000 == 5 draws * 1000 observations in data
expect_identical(NROW(sm), 5000L)
expect_identical(NCOL(sm), 3L) # 3 cols
expect_named(sm, expected = c("row", "draw", "fitted"))
})
test_that("fitted_samples works for a multi-smooth factor by GAM", {
expect_silent(sm <- fitted_samples(su_m_factor_by, n = 5, seed = 42))
expect_s3_class(sm, c("fitted_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 2000 == 5 draws * 400 observations in data
expect_identical(NROW(sm), 2000L)
expect_identical(NCOL(sm), 3L) # 3 cols
expect_named(sm, expected = c("row", "draw", "fitted"))
})
test_that("fitted_samples sets seed when seed not provided", {
expect_silent(fitted_samples(m_gam, seed = NULL))
})
test_that("fitted_samples() fails if not suitable method available", {
expect_error(fitted_samples(1:10),
"Don't know how to sample from the posterior of <integer>",
fixed = TRUE)
})
test_that("predicted_samples works for a simple GAM", {
expect_silent(sm <- predicted_samples(m_1_smooth, n = 5, seed = 42))
expect_s3_class(sm, c("predicted_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 2000 == 5 * 100 (nrow(dat))
expect_identical(NROW(sm), 1000L)
expect_identical(NCOL(sm), 3L) # 3 cols
expect_named(sm, expected = c("row", "draw", "response"))
})
test_that("predicted_samples works for a multi-smooth GAM", {
expect_silent(sm <- predicted_samples(m_gam, n = 5, seed = 42))
expect_s3_class(sm, c("predicted_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 5000 == 5 draws * 1000 observations in data
expect_identical(NROW(sm), 5000L)
expect_identical(NCOL(sm), 3L) # 3 cols
expect_named(sm, expected = c("row", "draw", "response"))
})
test_that("predicted_samples works for a multi-smooth factor by GAM", {
expect_silent(sm <- predicted_samples(su_m_factor_by, n = 5, seed = 42))
expect_s3_class(sm, c("predicted_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 2000 == 5 draws * 400 observations in data
expect_identical(NROW(sm), 2000L)
expect_identical(NCOL(sm), 3L) # 3 cols
expect_named(sm, expected = c("row", "draw", "response"))
})
test_that("predicted_samples sets seed when seed not provided", {
expect_silent(predicted_samples(m_gam, seed = NULL))
})
test_that("predicted_samples() fails if not suitable method available", {
expect_error(predicted_samples(1:10),
"Don't know how to sample from the posterior of <integer>",
fixed = TRUE)
})
test_that("posterior_samples() fails if no suitable method available", {
expect_error(posterior_samples(1:10),
"Don't know how to sample from the posterior of <integer>",
fixed = TRUE)
})
test_that("fitted_samples example output doesn't change", {
skip_on_cran()
skip_on_os("mac")
fs <- fitted_samples(m_gam, n = 5, seed = 42)
expect_snapshot(fs)
})
test_that("smooth_samples example output doesn't change", {
skip_on_cran()
skip_on_os("mac")
samples <- smooth_samples(m_gam, term = "s(x0)", n = 5, seed = 42)
expect_snapshot(samples)
})
|
/tests/testthat/test-posterior-samples.R
|
no_license
|
cran/gratia
|
R
| false
| false
| 7,622
|
r
|
## Test posterior sampling functions
## load packages
library("testthat")
library("mgcv")
library("gratia")
test_that("smooth_samples works for a continuous by GAM", {
expect_silent(sm <- smooth_samples(su_m_cont_by, n = 5, n_vals = 100,
seed = 42))
expect_s3_class(sm, c("smooth_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 500 == 1 smooth * 5 * 100
expect_identical(NROW(sm), 500L)
expect_identical(NCOL(sm), 8L) # 8 cols, univatiate smooths
})
test_that("smooth_samples works for a simple GAM", {
expect_silent(sm <- smooth_samples(m_1_smooth, n = 5, n_vals = 100,
seed = 42))
expect_s3_class(sm, c("smooth_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 500 == 1 smooth * 5 * 100
expect_identical(NROW(sm), 500L)
expect_identical(NCOL(sm), 8L) # 8 cols, univatiate smooths
})
test_that("smooth_samples works for a multi-smooth GAM", {
expect_silent(sm <- smooth_samples(m_gam, n = 5, n_vals = 100, seed = 42))
expect_s3_class(sm, c("smooth_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 2000 == 4 smooths * 5 * 100
expect_identical(NROW(sm), 2000L)
expect_identical(NCOL(sm), 11L) # 11 cols, 4 univatiate smooths
})
test_that("smooth_samples works for a multi-smooth factor by GAM", {
expect_silent(sm <- smooth_samples(su_m_factor_by, n = 5, n_vals = 50,
seed = 42))
expect_s3_class(sm, c("smooth_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 2000 == 1 + (1 * 3) smooths * 5 * 50
expect_identical(NROW(sm), 1000L)
expect_identical(NCOL(sm), 10L) # 10 cols, univatiate smooths with factor
})
test_that("smooth_samples() fails if not suitable method available", {
expect_error(smooth_samples(1:10),
"Don't know how to sample from the posterior of <integer>",
fixed = TRUE)
})
test_that("smooth_samples sets seed when seed not provided", {
expect_silent(smooth_samples(m_gam, seed = NULL))
})
test_that("smooth_samples works with term provided", {
expect_silent(sm <- smooth_samples(m_gam, term = "s(x2)", seed = 42))
})
test_that("smooth_samples errors with invalid term provided", {
expect_error(sm <- smooth_samples(m_gam, term = "s(x10)", seed = 42),
"None of the terms matched a smooth.", fixed = TRUE)
})
# from #121
test_that("smooth_samples gets the right factor by smooth: #121", {
expect_silent(sm <- smooth_samples(su_m_factor_by, n = 5, n_vals = 100,
term = "s(x2):fac2", seed = 42))
# factor level of `fac` column should be 2
expect_identical(all(sm["fac"] == 2), TRUE)
})
# from #121 - problems when model contains ranef smooths
test_that("smooth_samples ignores ranef smooths: #121", {
expect_message(sm <- smooth_samples(rm1, n = 5, n_vals = 100, seed = 42),
"Random effect smooths not currently supported.")
# given n and n_vals and 4 smooths, nrow == 2000L
expect_identical(nrow(sm), 2000L)
# shouldn't have "s(fac)" in sm
expect_identical(any(sm$smooth == "s(fac)"), FALSE)
})
test_that("smooth_samples fails if no smooths left to sample from", {
expect_error(sm <- smooth_samples(rm1, term = "s(fac)",
n = 5, n_vals = 100, seed = 42),
"No smooths left that can be sampled from.")
})
test_that("fitted_samples works for a simple GAM", {
expect_silent(sm <- fitted_samples(m_1_smooth, n = 5, seed = 42))
expect_s3_class(sm, c("fitted_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 1000 == 5 * 200 (nrow(dat))
expect_identical(NROW(sm), 1000L)
expect_identical(NCOL(sm), 3L) # 3 cols
expect_named(sm, expected = c("row", "draw", "fitted"))
})
test_that("fitted_samples works for a multi-smooth GAM", {
expect_silent(sm <- fitted_samples(m_gam, n = 5, seed = 42))
expect_s3_class(sm, c("fitted_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 5000 == 5 draws * 1000 observations in data
expect_identical(NROW(sm), 5000L)
expect_identical(NCOL(sm), 3L) # 3 cols
expect_named(sm, expected = c("row", "draw", "fitted"))
})
test_that("fitted_samples works for a multi-smooth factor by GAM", {
expect_silent(sm <- fitted_samples(su_m_factor_by, n = 5, seed = 42))
expect_s3_class(sm, c("fitted_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 2000 == 5 draws * 400 observations in data
expect_identical(NROW(sm), 2000L)
expect_identical(NCOL(sm), 3L) # 3 cols
expect_named(sm, expected = c("row", "draw", "fitted"))
})
test_that("fitted_samples sets seed when seed not provided", {
expect_silent(fitted_samples(m_gam, seed = NULL))
})
test_that("fitted_samples() fails if not suitable method available", {
expect_error(fitted_samples(1:10),
"Don't know how to sample from the posterior of <integer>",
fixed = TRUE)
})
test_that("predicted_samples works for a simple GAM", {
expect_silent(sm <- predicted_samples(m_1_smooth, n = 5, seed = 42))
expect_s3_class(sm, c("predicted_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 2000 == 5 * 100 (nrow(dat))
expect_identical(NROW(sm), 1000L)
expect_identical(NCOL(sm), 3L) # 3 cols
expect_named(sm, expected = c("row", "draw", "response"))
})
test_that("predicted_samples works for a multi-smooth GAM", {
expect_silent(sm <- predicted_samples(m_gam, n = 5, seed = 42))
expect_s3_class(sm, c("predicted_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 5000 == 5 draws * 1000 observations in data
expect_identical(NROW(sm), 5000L)
expect_identical(NCOL(sm), 3L) # 3 cols
expect_named(sm, expected = c("row", "draw", "response"))
})
test_that("predicted_samples works for a multi-smooth factor by GAM", {
expect_silent(sm <- predicted_samples(su_m_factor_by, n = 5, seed = 42))
expect_s3_class(sm, c("predicted_samples", "posterior_samples", "tbl_df",
"tbl", "data.frame"))
## 2000 == 5 draws * 400 observations in data
expect_identical(NROW(sm), 2000L)
expect_identical(NCOL(sm), 3L) # 3 cols
expect_named(sm, expected = c("row", "draw", "response"))
})
test_that("predicted_samples sets seed when seed not provided", {
expect_silent(predicted_samples(m_gam, seed = NULL))
})
test_that("predicted_samples() fails if not suitable method available", {
expect_error(predicted_samples(1:10),
"Don't know how to sample from the posterior of <integer>",
fixed = TRUE)
})
test_that("posterior_samples() fails if no suitable method available", {
expect_error(posterior_samples(1:10),
"Don't know how to sample from the posterior of <integer>",
fixed = TRUE)
})
test_that("fitted_samples example output doesn't change", {
skip_on_cran()
skip_on_os("mac")
fs <- fitted_samples(m_gam, n = 5, seed = 42)
expect_snapshot(fs)
})
test_that("smooth_samples example output doesn't change", {
skip_on_cran()
skip_on_os("mac")
samples <- smooth_samples(m_gam, term = "s(x0)", n = 5, seed = 42)
expect_snapshot(samples)
})
|
### Prokaryotes pipeline phyloseq ###
### Author: Bianca Trevizan Segovia ###
### Date created: November 18, 2019 ###
### Date modified: July 07, 2020 ###
### Date last modified: September 02, 2020 ###
### This code is now updated to remove the contaminants found in the 2016 dataset ###
### Data from 2015, 2017 and 2018 is rarefied to 3,000 reads/sample, and 2016 is not rarefied due to contamination
### Changed the pipeline in taxa filtering steps to avoid removal of other taxa in that rank (i.e. | is.na(Rank5)) and change in the ordering of filtering
### Added coverage-based rarefaction and saved tables to be used in all analyses
library(phyloseq)
library(tidyverse)
library(reshape2)
library(stringr)
library(ape)
library(dplyr)
library(data.table)
#### Importing files ####
all_years_16S_unfiltered <- readRDS("Data/prokaryotes/seagrass_16s.full_dataset.unfiltered.phyloseq_format.RDS")
#### QUALITY FILTERING TAXA DATA ####
# 1. Remove mitochondrial and chloroplast ASVs
all_years_16S_filtered <- all_years_16S_unfiltered %>%
subset_taxa(Rank5 != "Mitochondria" | is.na(Rank5)) %>%
subset_taxa(Rank3 != "Chloroplastida" | is.na(Rank3)) %>%
subset_taxa(Rank4 != "Chloroplast" | is.na(Rank4)) %>%
subset_taxa(Rank5 != "Chloroplast"| is.na(Rank5)) %>%
subset_taxa(Rank1 != "Unassigned"| is.na(Rank1))
# 2. Remove contaminants (those were on the 2016 data)
all_years_16S_filtered <- all_years_16S_filtered %>%
subset_taxa(Rank7 != "Pseudomonas_sp._ANT7125"| is.na(Rank7)) %>%
subset_taxa(Rank7 != "Alcaligenes_faecalis"| is.na(Rank7)) %>%
subset_taxa(Rank7 != "Pseudomonas_sp._ZJY-246"| is.na(Rank7))
#View(as.data.frame(tax_table(all_years_16S_filtered)))
# FILTERING per sample
# 3. Remove ASVs with less than ~ 2-5 reads in a given sample
otu <- as.data.frame(otu_table(all_years_16S_filtered))
otu_table(all_years_16S_filtered)[otu <= 3] <- 0 #free of noise, I set to 3 asvs/sample
otu2 <- as.data.frame(otu_table(all_years_16S_filtered)) #free of noise
# FILTERING overall
# 4. Remove OTUs with less than N total reads. (N = 250 in example) whole dataset
all_years_16S_filtered <- prune_taxa(taxa_sums(all_years_16S_filtered) >= 250, all_years_16S_filtered)
# 5. Remove samples with less than N reads. (N = 1000 in example) wholw dataset
all_years_16S_filtered <- prune_samples(sample_sums(all_years_16S_filtered) >= 1000, all_years_16S_filtered)
all_years_16S_filtered
# 6. look at minimum, mean, and maximum sample counts, if desired
smin <-
min(sample_sums(all_years_16S_filtered))
meanreads <-
mean(sample_sums(all_years_16S_filtered))
smax <-
max(sample_sums(all_years_16S_filtered))
totalreads <-
sum(sample_sums(all_years_16S_filtered))
get_sample(all_years_16S_filtered)
sample_sums(all_years_16S_filtered)
### include metadata (year column and sample_type_growth), and add it to phyloseq object
year_growth_column <- read.csv("Data/prokaryotes/year_growth_column_16S_ALL_YEARS_FILTERED.csv")
nrow(year_growth_column)
sample_data(all_years_16S_filtered)$year<- year_growth_column$year
sample_data(all_years_16S_filtered)$growth <- year_growth_column$growth
all_years_16S_filtered_meso <- all_years_16S_filtered %>% subset_samples(survey_type == "meso_quadrat" | survey_type == "meso_quadrats" )
all_years_16S_filtered_meso_Zos <- all_years_16S_filtered_meso %>% subset_samples(growth =="old")
#View(as.data.frame(otu_table(all_years_16S_filtered_meso_Zos)))
# ## 16S all years rarefied to 3,000
all_years_16S_RAREFIED <- rarefy_even_depth(all_years_16S_filtered_meso_Zos,
sample.size = 3000, # Estimated from rarefaction plot
rngseed = 7, # set seed for reproducibility
replace = FALSE)# sample without replacement; slower but more accurate
# subset samples from 2015, 2017 and 2018 to rarefy only those to 3,000 * 2016 had lower sequencing depth and will be rarefied to a lower level
all_years_16S_filtered_no_2016 <- all_years_16S_filtered_meso_Zos %>% subset_samples(!year=="2016")
all_years_16S_filtered_ONLY_2016 <- all_years_16S_filtered_meso_Zos %>% subset_samples(year=="2016")
# as.data.frame(sample_data(all_years_16S_filtered_ONLY_2016))[["year"]]
##################################################
### rarefying data using coverage based iNEXT ###
##################################################
# install.packages("remotes")
# remotes::install_github("vmikk/metagMisc")
library(metagMisc)
# phyloseq_coverage_raref(physeq, coverage = NULL, iter = 1, replace = F, correct_singletons = FALSE, seeds = NULL, multithread = F, drop_lowcoverage = F, ...)
# Samples standardized by size will have different degrees of completness. When we compare samples with the same coverage, we are making sure that samples are equally complete and that the unsampled species constitute the same proportion of the total individuals in each community (Chao, Jost, 2012).
# i.e. a seagrass sample with 10,000 total reads will have a different coverage than a seawater sample with 10,000 reads if seawater samples have many more species
all_years_16S_filtered_meso_Zos
taxa_are_rows(all_years_16S_filtered_meso_Zos)
# transpose so taxa are rows
otu_table(all_years_16S_filtered_meso_Zos) <- t(otu_table(all_years_16S_filtered_meso_Zos))
taxa_are_rows(all_years_16S_filtered_meso_Zos)
x <- metagMisc::prepare_inext(
as.data.frame(otu_table(all_years_16S_filtered_meso_Zos)),
correct_singletons = T)
SC <- plyr::llply(.data = x, .fun = function(z){ try( iNEXT:::Chat.Ind(z, sum(z)) ) })
plyr::ldply(.data = SC, .fun = class)
#saveRDS(all_years_16S_filtered_meso_Zos, "/Users/bia/PostDoc/projects/Calvert_O-Connor_eelgrass/Data/prokaryotes/all_years_16S_filtered_meso_Zos_ASV.rds")
all_years_16S_filtered_meso_Zos <- readRDS("/Users/bia/PostDoc/projects/Calvert_O-Connor_eelgrass/Data/prokaryotes/all_years_16S_filtered_meso_Zos_ASV_REMOVED_LOW_COUNT.rds")
#Due to the stochasticity introduced in random subsampling results could be slightly different.
#So you have to average diversity estimates or sample dissimilarities across multiple rarefactions.
# run coverage-based rarefaction (Chao & Jost, 2012) correcting for singletons (Chiu & Chao 2016)
all_16S_COVERAGE_RAREF_200 <- phyloseq_coverage_raref(physeq=all_years_16S_filtered_meso_Zos, coverage = 0.8, iter = 200, replace = F, correct_singletons = TRUE, drop_lowcoverage = F)
saveRDS(all_16S_COVERAGE_RAREF_200, "/Users/bia/PostDoc/projects/Calvert_O-Connor_eelgrass/Data/prokaryotes/all_16S_COVERAGE_RAREF_200_REMOVED_LOW_COUNT.rds")
all_16S_COVERAGE_RAREF_200 <- readRDS("/Users/bia/PostDoc/projects/Calvert_O-Connor_eelgrass/Data/prokaryotes/all_16S_COVERAGE_RAREF_200_REMOVED_LOW_COUNT.rds")
### Average otu tables from all iterations to get a final robust table ###
subset_phylo_objects_200 <- all_16S_COVERAGE_RAREF_200[c(1:200)]
# first, extract otu tables from phyloseq objects
# this is how you do it for a single phyloseq object:
# y <- as.data.frame(t(phyloseq::otu_table(all_16S_COVERAGE_RAREF)))
# now do it for the list of phyloseq objects
otu_tables_200 <- lapply(subset_phylo_objects_200, function(z) as.data.frame(t(phyloseq::otu_table(z))))
# average all matrices to get the mean abundance across all iterations
average_otu_tables_200 <- Reduce("+",otu_tables_200)/length(otu_tables_200)
# IMPORTANT! NEED TO ROUND IT OTHERWISE iNEXT WILL NOT WORK!
average_otu_tables_200 <- average_otu_tables_200 %>% mutate_at(vars(starts_with("ASV")), funs(round(., 0)))
# add SampleID column back
average_otu_tables_200_round$SampleID <- rownames(average_otu_tables_200)
average_otu_tables_200_round <- average_otu_tables_200_round %>%
select(SampleID, everything())
write.csv(average_otu_tables_200, "Data/prokaryotes/prok_average_otu_tables_200_REMOVING_LOW_COUNT.csv", quote=F, row.names=F )
# Now check if different number of iterations yield the same pattern
average_otu_tables_200 <- read.csv("Data/prokaryotes/prok_average_otu_tables_200_REMOVING_LOW_COUNT.csv", header=T)
average_otu_tables_200$cover_based_iterations <- "twohundred"
average_otu_tables_200 <- average_otu_tables_200 %>%
select(cover_based_iterations, everything())
# join all in a single data frame to make boxplot for alpha diversity
otu_tables_iterations <- bind_rows(average_otu_tables_5, average_otu_tables_50,average_otu_tables_100, average_otu_tables_200)
### add metadata according to #SampleID labels
metadata <- read.csv(file="Data/prokaryotes/EDITED_16S_final_metadata.csv",header=T )
metadata_sel_iter <- metadata %>%
dplyr::select(c(SampleID,swab_id, barcode_plate, barcode_well, year ,region, site, host_species, host_type, sample_type, survey_type, quadrat_id, meso_shoot_id))
master_table_iter <- inner_join(metadata_sel_iter , otu_tables_iterations , by = "SampleID")
View(as.data.frame(master_table_iter ))
##############################################################
### saving Coverage-based rarefaction with more iterations ###
#############################################################
master_table_iter
### Exclude the following samples for analyses: "ZosCSPE", "ZosCSPF" # no info if new or old leaf and ZosCSPoldM and ZosPBSoldD18 which was all NAs
exclude <- c("ZosCSPE", "ZosCSPF")
master_table_iter <- master_table_iter %>%
dplyr::filter(!SampleID %in% exclude)
###recode to site names used by grazers
master_table_iter <- master_table_iter %>%
dplyr::mutate(site=recode(site,
"choked_south_pigu" = "choked_inner",
"choked_flat_island" = "choked_inner",
"mcmullin_north" = "mcmullins_north",
"mcmullin_south" = "mcmullins_south",
"goose_southwest" = "goose_south_west",
"goose_southeast" = "goose_south_east",
"pruth_bay_south" = "pruth_bay",
"pruth_baysouth" = "pruth_bay"))
master_table_iter <- master_table_iter %>%
dplyr::mutate(region=recode(region,
"mcmullin" = "mcmullins"))
# For mastel final table, get only leaf_old
master_table_iter_final <- master_table_iter %>%
dplyr::filter(sample_type =="leaf_old")
# get only meso_quadrat survey
master_table_iter_final <- master_table_iter_final %>%
dplyr::filter(survey_type == "meso_quadrat")
# create a region_year column so can remove only mcmullin 2016 samples
master_table_iter_final <- master_table_iter_final %>%
dplyr::mutate(region_year = paste(region, year, sep = "_"))
# reorganize column orders (get region_year to first columns together with metadata)
master_table_iter_final <- master_table_iter_final %>%
dplyr::select(SampleID, swab_id, barcode_plate, barcode_well, year, region_year, everything())
# remove mcmullin 2016 samples
master_table_iter_final <- master_table_iter_final %>%
dplyr::filter(!region_year == "mcmullins_2016")
#create a unique site_quadrat_id column
master_table_iter_final <- master_table_iter_final %>%
unite(site_quadrat_id, site, quadrat_id, sep = "_" , remove = FALSE) #remove F so it doesn't remove the columns that were combined
View(master_table_iter_final)
master_table_200_iter <- master_table_iter_final %>%
filter(cover_based_iterations == "twohundred")
master_table_200_iter <- master_table_200_iter %>% mutate_at(vars(starts_with("ASV")), funs(round(., 0)))
write.csv(master_table_200_iter, "Data/R_Code_for_Data_Prep/master_data/MASTER_prokary_ASV_level_200_COVERAGE_RAREF.csv", row.names=F)
|
/Data/prokaryotes/tests/prokary_pipeline_COVERAGE_BASED_REMOVING_LOW_COUNT.R
|
no_license
|
mawhal/Calvert_O-Connor_eelgrass
|
R
| false
| false
| 11,604
|
r
|
### Prokaryotes pipeline phyloseq ###
### Author: Bianca Trevizan Segovia ###
### Date created: November 18, 2019 ###
### Date modified: July 07, 2020 ###
### Date last modified: September 02, 2020 ###
### This code is now updated to remove the contaminants found in the 2016 dataset ###
### Data from 2015, 2017 and 2018 is rarefied to 3,000 reads/sample, and 2016 is not rarefied due to contamination
### Changed the pipeline in taxa filtering steps to avoid removal of other taxa in that rank (i.e. | is.na(Rank5)) and change in the ordering of filtering
### Added coverage-based rarefaction and saved tables to be used in all analyses
library(phyloseq)
library(tidyverse)
library(reshape2)
library(stringr)
library(ape)
library(dplyr)
library(data.table)
#### Importing files ####
all_years_16S_unfiltered <- readRDS("Data/prokaryotes/seagrass_16s.full_dataset.unfiltered.phyloseq_format.RDS")
#### QUALITY FILTERING TAXA DATA ####
# 1. Remove mitochondrial and chloroplast ASVs
all_years_16S_filtered <- all_years_16S_unfiltered %>%
subset_taxa(Rank5 != "Mitochondria" | is.na(Rank5)) %>%
subset_taxa(Rank3 != "Chloroplastida" | is.na(Rank3)) %>%
subset_taxa(Rank4 != "Chloroplast" | is.na(Rank4)) %>%
subset_taxa(Rank5 != "Chloroplast"| is.na(Rank5)) %>%
subset_taxa(Rank1 != "Unassigned"| is.na(Rank1))
# 2. Remove contaminants (those were on the 2016 data)
all_years_16S_filtered <- all_years_16S_filtered %>%
subset_taxa(Rank7 != "Pseudomonas_sp._ANT7125"| is.na(Rank7)) %>%
subset_taxa(Rank7 != "Alcaligenes_faecalis"| is.na(Rank7)) %>%
subset_taxa(Rank7 != "Pseudomonas_sp._ZJY-246"| is.na(Rank7))
#View(as.data.frame(tax_table(all_years_16S_filtered)))
# FILTERING per sample
# 3. Remove ASVs with less than ~ 2-5 reads in a given sample
otu <- as.data.frame(otu_table(all_years_16S_filtered))
otu_table(all_years_16S_filtered)[otu <= 3] <- 0 #free of noise, I set to 3 asvs/sample
otu2 <- as.data.frame(otu_table(all_years_16S_filtered)) #free of noise
# FILTERING overall
# 4. Remove OTUs with less than N total reads. (N = 250 in example) whole dataset
all_years_16S_filtered <- prune_taxa(taxa_sums(all_years_16S_filtered) >= 250, all_years_16S_filtered)
# 5. Remove samples with less than N reads. (N = 1000 in example) wholw dataset
all_years_16S_filtered <- prune_samples(sample_sums(all_years_16S_filtered) >= 1000, all_years_16S_filtered)
all_years_16S_filtered
# 6. look at minimum, mean, and maximum sample counts, if desired
smin <-
min(sample_sums(all_years_16S_filtered))
meanreads <-
mean(sample_sums(all_years_16S_filtered))
smax <-
max(sample_sums(all_years_16S_filtered))
totalreads <-
sum(sample_sums(all_years_16S_filtered))
get_sample(all_years_16S_filtered)
sample_sums(all_years_16S_filtered)
### include metadata (year column and sample_type_growth), and add it to phyloseq object
year_growth_column <- read.csv("Data/prokaryotes/year_growth_column_16S_ALL_YEARS_FILTERED.csv")
nrow(year_growth_column)
sample_data(all_years_16S_filtered)$year<- year_growth_column$year
sample_data(all_years_16S_filtered)$growth <- year_growth_column$growth
all_years_16S_filtered_meso <- all_years_16S_filtered %>% subset_samples(survey_type == "meso_quadrat" | survey_type == "meso_quadrats" )
all_years_16S_filtered_meso_Zos <- all_years_16S_filtered_meso %>% subset_samples(growth =="old")
#View(as.data.frame(otu_table(all_years_16S_filtered_meso_Zos)))
# ## 16S all years rarefied to 3,000
all_years_16S_RAREFIED <- rarefy_even_depth(all_years_16S_filtered_meso_Zos,
sample.size = 3000, # Estimated from rarefaction plot
rngseed = 7, # set seed for reproducibility
replace = FALSE)# sample without replacement; slower but more accurate
# subset samples from 2015, 2017 and 2018 to rarefy only those to 3,000 * 2016 had lower sequencing depth and will be rarefied to a lower level
all_years_16S_filtered_no_2016 <- all_years_16S_filtered_meso_Zos %>% subset_samples(!year=="2016")
all_years_16S_filtered_ONLY_2016 <- all_years_16S_filtered_meso_Zos %>% subset_samples(year=="2016")
# as.data.frame(sample_data(all_years_16S_filtered_ONLY_2016))[["year"]]
##################################################
### rarefying data using coverage based iNEXT ###
##################################################
# install.packages("remotes")
# remotes::install_github("vmikk/metagMisc")
library(metagMisc)
# phyloseq_coverage_raref(physeq, coverage = NULL, iter = 1, replace = F, correct_singletons = FALSE, seeds = NULL, multithread = F, drop_lowcoverage = F, ...)
# Samples standardized by size will have different degrees of completness. When we compare samples with the same coverage, we are making sure that samples are equally complete and that the unsampled species constitute the same proportion of the total individuals in each community (Chao, Jost, 2012).
# i.e. a seagrass sample with 10,000 total reads will have a different coverage than a seawater sample with 10,000 reads if seawater samples have many more species
all_years_16S_filtered_meso_Zos
taxa_are_rows(all_years_16S_filtered_meso_Zos)
# transpose so taxa are rows
otu_table(all_years_16S_filtered_meso_Zos) <- t(otu_table(all_years_16S_filtered_meso_Zos))
taxa_are_rows(all_years_16S_filtered_meso_Zos)
x <- metagMisc::prepare_inext(
as.data.frame(otu_table(all_years_16S_filtered_meso_Zos)),
correct_singletons = T)
SC <- plyr::llply(.data = x, .fun = function(z){ try( iNEXT:::Chat.Ind(z, sum(z)) ) })
plyr::ldply(.data = SC, .fun = class)
#saveRDS(all_years_16S_filtered_meso_Zos, "/Users/bia/PostDoc/projects/Calvert_O-Connor_eelgrass/Data/prokaryotes/all_years_16S_filtered_meso_Zos_ASV.rds")
all_years_16S_filtered_meso_Zos <- readRDS("/Users/bia/PostDoc/projects/Calvert_O-Connor_eelgrass/Data/prokaryotes/all_years_16S_filtered_meso_Zos_ASV_REMOVED_LOW_COUNT.rds")
#Due to the stochasticity introduced in random subsampling results could be slightly different.
#So you have to average diversity estimates or sample dissimilarities across multiple rarefactions.
# run coverage-based rarefaction (Chao & Jost, 2012) correcting for singletons (Chiu & Chao 2016)
all_16S_COVERAGE_RAREF_200 <- phyloseq_coverage_raref(physeq=all_years_16S_filtered_meso_Zos, coverage = 0.8, iter = 200, replace = F, correct_singletons = TRUE, drop_lowcoverage = F)
saveRDS(all_16S_COVERAGE_RAREF_200, "/Users/bia/PostDoc/projects/Calvert_O-Connor_eelgrass/Data/prokaryotes/all_16S_COVERAGE_RAREF_200_REMOVED_LOW_COUNT.rds")
all_16S_COVERAGE_RAREF_200 <- readRDS("/Users/bia/PostDoc/projects/Calvert_O-Connor_eelgrass/Data/prokaryotes/all_16S_COVERAGE_RAREF_200_REMOVED_LOW_COUNT.rds")
### Average otu tables from all iterations to get a final robust table ###
subset_phylo_objects_200 <- all_16S_COVERAGE_RAREF_200[c(1:200)]
# first, extract otu tables from phyloseq objects
# this is how you do it for a single phyloseq object:
# y <- as.data.frame(t(phyloseq::otu_table(all_16S_COVERAGE_RAREF)))
# now do it for the list of phyloseq objects
otu_tables_200 <- lapply(subset_phylo_objects_200, function(z) as.data.frame(t(phyloseq::otu_table(z))))
# average all matrices to get the mean abundance across all iterations
average_otu_tables_200 <- Reduce("+",otu_tables_200)/length(otu_tables_200)
# IMPORTANT! NEED TO ROUND IT OTHERWISE iNEXT WILL NOT WORK!
average_otu_tables_200 <- average_otu_tables_200 %>% mutate_at(vars(starts_with("ASV")), funs(round(., 0)))
# add SampleID column back
average_otu_tables_200_round$SampleID <- rownames(average_otu_tables_200)
average_otu_tables_200_round <- average_otu_tables_200_round %>%
select(SampleID, everything())
write.csv(average_otu_tables_200, "Data/prokaryotes/prok_average_otu_tables_200_REMOVING_LOW_COUNT.csv", quote=F, row.names=F )
# Now check if different number of iterations yield the same pattern
average_otu_tables_200 <- read.csv("Data/prokaryotes/prok_average_otu_tables_200_REMOVING_LOW_COUNT.csv", header=T)
average_otu_tables_200$cover_based_iterations <- "twohundred"
average_otu_tables_200 <- average_otu_tables_200 %>%
select(cover_based_iterations, everything())
# join all in a single data frame to make boxplot for alpha diversity
otu_tables_iterations <- bind_rows(average_otu_tables_5, average_otu_tables_50,average_otu_tables_100, average_otu_tables_200)
### add metadata according to #SampleID labels
metadata <- read.csv(file="Data/prokaryotes/EDITED_16S_final_metadata.csv",header=T )
metadata_sel_iter <- metadata %>%
dplyr::select(c(SampleID,swab_id, barcode_plate, barcode_well, year ,region, site, host_species, host_type, sample_type, survey_type, quadrat_id, meso_shoot_id))
master_table_iter <- inner_join(metadata_sel_iter , otu_tables_iterations , by = "SampleID")
View(as.data.frame(master_table_iter ))
##############################################################
### saving Coverage-based rarefaction with more iterations ###
#############################################################
master_table_iter
### Exclude the following samples for analyses: "ZosCSPE", "ZosCSPF" # no info if new or old leaf and ZosCSPoldM and ZosPBSoldD18 which was all NAs
exclude <- c("ZosCSPE", "ZosCSPF")
master_table_iter <- master_table_iter %>%
dplyr::filter(!SampleID %in% exclude)
###recode to site names used by grazers
master_table_iter <- master_table_iter %>%
dplyr::mutate(site=recode(site,
"choked_south_pigu" = "choked_inner",
"choked_flat_island" = "choked_inner",
"mcmullin_north" = "mcmullins_north",
"mcmullin_south" = "mcmullins_south",
"goose_southwest" = "goose_south_west",
"goose_southeast" = "goose_south_east",
"pruth_bay_south" = "pruth_bay",
"pruth_baysouth" = "pruth_bay"))
master_table_iter <- master_table_iter %>%
dplyr::mutate(region=recode(region,
"mcmullin" = "mcmullins"))
# For mastel final table, get only leaf_old
master_table_iter_final <- master_table_iter %>%
dplyr::filter(sample_type =="leaf_old")
# get only meso_quadrat survey
master_table_iter_final <- master_table_iter_final %>%
dplyr::filter(survey_type == "meso_quadrat")
# create a region_year column so can remove only mcmullin 2016 samples
master_table_iter_final <- master_table_iter_final %>%
dplyr::mutate(region_year = paste(region, year, sep = "_"))
# reorganize column orders (get region_year to first columns together with metadata)
master_table_iter_final <- master_table_iter_final %>%
dplyr::select(SampleID, swab_id, barcode_plate, barcode_well, year, region_year, everything())
# remove mcmullin 2016 samples
master_table_iter_final <- master_table_iter_final %>%
dplyr::filter(!region_year == "mcmullins_2016")
#create a unique site_quadrat_id column
master_table_iter_final <- master_table_iter_final %>%
unite(site_quadrat_id, site, quadrat_id, sep = "_" , remove = FALSE) #remove F so it doesn't remove the columns that were combined
View(master_table_iter_final)
master_table_200_iter <- master_table_iter_final %>%
filter(cover_based_iterations == "twohundred")
master_table_200_iter <- master_table_200_iter %>% mutate_at(vars(starts_with("ASV")), funs(round(., 0)))
write.csv(master_table_200_iter, "Data/R_Code_for_Data_Prep/master_data/MASTER_prokary_ASV_level_200_COVERAGE_RAREF.csv", row.names=F)
|
#! /bin/env Rscript
library(Biostrings)
library(ggplot2)
library(tidyverse)
library(openxlsx)
library(venneuler)
library(getopt)
current_dir = "/home/jwen/projects/qi/elba/code_NC/"
setwd(current_dir)
data_dir = paste(current_dir, "data/",sep="")
res_dir = paste(current_dir, "res/",sep="")
peak_dir = paste(data_dir, "chipseq_peaks/",sep="")
############################################################################################################
############################################## function #######################################
create_beds <- function(bn_dir, dnp, nm, dist, dofasta=F) {
outf = paste(bn_dir,"/", nm,"_",dist,".bed", sep="")
unlink(outf)
write.table(dbed[,1:6],file=outf,row.names = F,col.names=F, quote = F,sep="\t",append = F)
outf_bb = paste(bn_dir,"/", nm,"_",dist,".bb", sep="")
system(paste("bedToBigBed ",outf," ",genome_f," ",outf_bb,sep=""))
if (dofasta) {
outfasta = paste(bn_dir,"/", nm,"_",dist,".fasta", sep="")
system(paste("fastaFromBed -fi ",genome_fasta," -name -s -bed ",outf," -fo ",outfasta,sep="" ))
ffseq = readDNAStringSet(outfasta)
names(ffseq) = gsub("\\((.*)", "",names(ffseq), perl=T)
writeXStringSet(ffseq, file=outfasta, width=20000)
}
}
call_fimo <- function (infile, motif_f, nm) {
outdir = paste(fimo_dir, nm, sep="")
system(paste("fimo --parse-genomic-coord --oc ", outdir," ", motif_f, " ",infile, sep=""))
# mast [options] <motif file> <sequence file>
}
mergeset <- function (indirs, dist, nm) {
lnfiles = NULL
for (jj in indirs) {
cat(jj,"\n")
infile = list.files(path=paste("../",jj,sep=""),pattern = "(.*)_narrowPeak.bed", full=T)
if (length(infile) == 1) {
lnfile = gsub("diff__|_q20|_narrowPeak.bed", "", basename(infile), perl=T)
system(paste("ln -sf ", infile, " ",lnfile, sep=""))
lnfiles = c(lnfiles, lnfile)
}
}
lnfiles_str = paste(sort(lnfiles,decreasing=T), sep="", collapse=" ")
outf = paste(nm, "_",dist,".txt", sep="")
vennf = paste(nm, "_",dist,"_venn.txt", sep="")
vennxls = paste(nm, "_",dist,"_venn.xls", sep="")
system(paste("mergePeaks -d ", dist, " ", lnfiles_str ," -venn ",vennf, " > ", outf,sep=""))
}
mergeset_anno <- function (indirs, outdir,dist="given", nm, savef) {
peakfile = paste(nm, "_given.txt",sep="")
dm = read.delim(peakfile,stringsAsFactors=F)
colnames(dm)[1] = c("mergedPeakId")
write.table(dm,file=peakfile,row.names = F,col.names=F, quote = F,sep="\t",append = T)
dm = dm %>% mutate(coord=paste(chr,":",start,"-", end,sep=""))
peakannofile = gsub(".txt", "_annot.txt", peakfile, perl=T)
statfile = gsub(".txt", "_stat.txt", peakfile, perl=T)
GO_dir = paste("GO/",sep="")
GenomeOntology_dir = paste("GenomeOntology/",sep="")
system(paste("annotatePeaks.pl ", peakfile," dm3 -go ", GO_dir, " -genomeOntology ",GenomeOntology_dir, " -annStats ", statfile, " > ", peakannofile,sep=""))
load(file = paste(data_dir,"diffpeaks_anno_q20TRUE_spikeinFALSE_correctedMotif.RData",sep=""))
selected = intersect(names(peak_l_uni), indirs)
Dsuper1 = NULL
Dsuper2 = select(dm, mergedPeakId) %>% distinct()
for (bn in selected) {
bn2 = gsub("_q20|diff__","",bn,perl=T)
ddi = peak_l_uni[[bn]]
ddi = ddi %>% dplyr::select(peakid, nearestTSS,peakscore, insv_CCAATTGG,insv_CCAATAAG, CP190, GAGA, BEAF32,EBox, SuH, matches("dist2NearestTSS|TSS_up2k|five_prime_UTR|three_prime_UTR|CDS|intron")) %>% distinct()
ddi[is.na(ddi)] = ""
dm2_i = dm[, regexpr(paste("mergedPeakId|",bn2,sep="",collapse=""), colnames(dm),perl=T) !=-1]
colnames(dm2_i) = c("mergedPeakId", "peakid")
npeaks = sapply(str_extract_all(paste(dm2_i$peakid,",",sep=""), ","), length)
dm2_i = mutate(dm2_i, npeaks=npeaks)
dm2_i = data.frame(mergedPeakId=dm2_i[rep(1:nrow(dm2_i), dm2_i$npeaks),"mergedPeakId"], peakid = unlist(strsplit(paste(dm2_i$peakid,",",sep=""),split=",")),stringsAsFactors=F )
dm2_i = dm2_i %>% inner_join(ddi)
dm2_i1 = dm2_i %>% dplyr::select( mergedPeakId,nearestTSS, insv_CCAATTGG, insv_CCAATAAG, CP190,GAGA, BEAF32,EBox, SuH, matches("dist2NearestTSS|TSS_up2k|five_prime_UTR|three_prime_UTR|CDS|intron"))
dm2_i2 = dm2_i %>% dplyr::select(mergedPeakId,peakid, peakscore) %>% distinct()
dm2_i2 = dm2_i2 %>% group_by(mergedPeakId) %>% summarise(peakid=paste(unique(peakid), collapse=";",sep=""), maxPeakscore=max(peakscore,na.rm = TRUE))
colnames(dm2_i2 )[2:ncol(dm2_i2 )] = paste(bn2,".",colnames(dm2_i2 )[2:ncol(dm2_i2 )],sep="" )
if (is.null (Dsuper1) ) {
Dsuper1 = dm2_i1
} else {
Dsuper1 = bind_rows(Dsuper1, dm2_i1)
}
Dsuper2 = Dsuper2 %>% left_join(dm2_i2)
}
Dsuper1 = Dsuper1 %>% distinct() %>% group_by(mergedPeakId) %>% summarise(nearestTSS=first(nearestTSS),
insv_CCAATTGG=paste(unique(insv_CCAATTGG), collapse=";",sep=""), insv_CCAATAAG=paste(unique(insv_CCAATAAG), collapse=";",sep=""),
CP190=paste(unique(CP190), collapse=";",sep=""), GAGA=paste(unique(GAGA), collapse=";",sep=""),
BEAF32=paste(unique(BEAF32), collapse=";",sep=""), EBox=paste(unique(EBox), collapse=";",sep=""), SuH=paste(unique(SuH), collapse=";",sep=""),
TSS_up2k=paste(unique(TSS_up2k), collapse=";",sep=""),five_prime_UTR=paste(unique(five_prime_UTR), collapse=";",sep=""),
three_prime_UTR=paste(unique(three_prime_UTR), collapse=";",sep=""),CDS=paste(unique(CDS), collapse=";",sep=""),
intron=paste(unique(intron), collapse=";",sep=""))
Dsuper1 = Dsuper1 %>% mutate(insv_CCAATTGG = gsub("^;|;$","",insv_CCAATTGG,perl=T)) %>% mutate(insv_CCAATAAG=gsub("^;|;$","",insv_CCAATAAG,perl=T)) %>%
mutate(CP190=gsub("^;|;$","",CP190,perl=T)) %>% mutate(GAGA=gsub("^;|;$","",GAGA,perl=T)) %>%
mutate(BEAF32=gsub("^;|;$","",BEAF32,perl=T)) %>% mutate(EBox=gsub("^;|;$","",EBox,perl=T)) %>% mutate(SuH=gsub("^;|;$","",SuH,perl=T)) %>%
mutate(TSS_up2k=gsub("^;|;$","",TSS_up2k,perl=T), five_prime_UTR=gsub("^;|;$","",five_prime_UTR,perl=T),three_prime_UTR=gsub("^;|;$","",three_prime_UTR,perl=T)) %>%
mutate(CDS=gsub("^;|;$","",CDS,perl=T), intron=gsub("^;|;$","",intron,perl=T))
Dsuper = select(dm, mergedPeakId, coord, Total.subpeaks) %>% distinct() %>% inner_join(Dsuper1) %>% inner_join(Dsuper2) %>%
mutate(maxScore=apply(cbind(wtElba1__elba1Elba1.maxPeakscore, wtElba2__elba2Elba2.maxPeakscore, wtElba3__elba3Elba3.maxPeakscore, wtInsv__insvInsv.maxPeakscore), 1, max, na.rm = TRUE)) %>%
arrange(desc(maxScore))
dd = read.delim(peakannofile, stringsAsFactors=F, header=T)
colnames(dd)[1] = "PeakID"
dd$Focus.Ratio.Region.Size = gsub("_peaks.txt","", dd$Focus.Ratio.Region.Size)
dd = dd %>% filter(PeakID %in% Dsuper$mergedPeakId) %>%
mutate(Annotation=gsub(" UTR", "UTR", Annotation)) %>%
mutate(genomic_region=gsub("\\s+(.*)", "", Annotation)) %>%
arrange(desc(Peak.Score)) %>%
select(PeakID, Gene.Name, genomic_region, Distance.to.TSS)
Dsuper2 = Dsuper %>% select(mergedPeakId, coord, insv_CCAATTGG, insv_CCAATAAG, CP190, GAGA,BEAF32,EBox, SuH, matches("peakid"),maxScore)%>%
left_join(dd, by=c("mergedPeakId"="PeakID")) %>%
select(mergedPeakId, coord, genomic_region, Gene.Name, Distance.to.TSS, everything())
Dsuper2 = Dsuper2 %>% mutate(motifs = paste(insv_CCAATTGG,insv_CCAATAAG,CP190,GAGA,sep=";"))
Dsuper2 = Dsuper2 %>% mutate(motifs=gsub("\\;{2,}",";", motifs)) %>% mutate(motifs=gsub("^\\;{1,}","", motifs)) %>% mutate(motifs=gsub("\\;{1,}$","", motifs))
Dsuper2[Dsuper2$motifs %in% "", "motifs"] = "nomotifs"
Dsuper2 = Dsuper2 %>% mutate(motifsCombine = "")
Dsuper2[grepl("insv_CCAATTGG;insv_CCAATAAG", Dsuper2$motifs) & (Dsuper2$motifsCombine %in% ""), "motifsCombine"] = "insv_CCAATTGG;insv_CCAATAAG"
Dsuper2[grepl("insv_CCAATTGG", Dsuper2$motifs) & (Dsuper2$motifsCombine %in% ""), "motifsCombine"] = "insv_CCAATTGG"
Dsuper2[grepl("insv_CCAATAAG", Dsuper2$motifs) & (Dsuper2$motifsCombine %in% ""), "motifsCombine"] = "insv_CCAATAAG"
table(Dsuper2$motifsCombine)
dm = dm %>% select(matches("mergedPeakId|Elba|Insv"))
dm_peak = NULL
flist = list()
for (ii in 2:ncol(dm)) {
bn = colnames(dm)[ii]
dm_i = dm[, c("mergedPeakId", bn)]
colnames(dm_i) = c("mergedPeakId", "peak")
dm_i = dm_i %>% filter(peak != "")
flist[[bn]] = unique(dm_i$mergedPeakId)
dm_peak_i = data.frame(mergedPeakId=unique(dm_i$mergedPeakId), factor_motif=bn, stringsAsFactors=F)
dm_peak = rbind(dm_peak, dm_peak_i)
}
# save(flist,dm_peak,file=paste(RData_dir,"Elba_merge_all_mutant_peaks.RData",sep="") )
save(Dsuper,Dsuper2,dd,flist,dm_peak,file=savef)
}
############################################################################################################
################################################# merge diff peak ##########################################
setwd(peak_dir)
macs_dirs = list.dirs(path = ".", full.names = FALSE, recursive = FALSE)
######## 4 major binding sites: wt vs cognate mutant ########
indirs = macs_dirs[grepl("wtElba1_q20__elba1Elba1|wtElba2_q20__elba2Elba2|wtElba3_q20__elba3Elba3|wtInsv_q20__insvInsv",macs_dirs)]
outdir = paste("merge__major", sep="")
dir.create(outdir)
setwd(outdir)
mergeset(indirs, dist="given", nm="merge_major")
mergeset_anno(indirs, outdir,dist="given", nm="merge_major", savef = paste(data_dir, "chip_supertable_with_RNAseq_correctedMotif.RData",sep=""))
######## wt+mutant vs cognate mutant: 16 ########
setwd(peak_dir)
indirs = macs_dirs[grepl("Elba1_q20__elba1Elba1|Elba3_q20__elba3Elba3|Elba2_q20__elba2Elba2|Insv_q20__insvInsv",macs_dirs)]
outdir = paste("merge__major_ElbaAntibody", sep="")
dir.create(outdir)
setwd(outdir)
mergeset(indirs, dist="given", nm="mergemajor_ElbaAntibody")
mergeset_anno(indirs, outdir,dist="given", nm="mergemajor_ElbaAntibody", savef = paste(data_dir, "chip_ElbaAntibody_supertable_with_RNAseq_correctedMotif.RData",sep=""))
############################################################################################################
################################################# chipseq subsets ##########################################
load(file=paste(data_dir, "chip_ElbaAntibody_supertable_with_RNAseq_correctedMotif.RData",sep=""))
Dsuper2 = Dsuper2[!is.infinite(Dsuper2$maxScore), ]
chip_subsets = list()
########################### Wt major ###########################
chip_subsets[["wtElba1"]] = Dsuper2 %>% filter(!is.na(wtElba1__elba1Elba1.peakid))
chip_subsets[["wtElba2"]] = Dsuper2 %>% filter(!is.na(wtElba2__elba2Elba2.peakid))
chip_subsets[["wtElba3"]] = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid))
chip_subsets[["wtInsv"]] = Dsuper2 %>% filter(!is.na(wtInsv__insvInsv.peakid))
########################### Elba3 #############################
elba3_elba12dep = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & is.na(elba1Elba3__elba3Elba3.peakid) & is.na(elba2Elba3__elba3Elba3.peakid))
elba3_elba12indep = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & (!is.na(elba1Elba3__elba3Elba3.peakid) | !is.na(elba2Elba3__elba3Elba3.peakid)))
elba12_gained = Dsuper2 %>% filter(is.na(wtElba3__elba3Elba3.peakid) & (!is.na(elba1Elba3__elba3Elba3.peakid) | !is.na(elba2Elba3__elba3Elba3.peakid)))
elba1_gained = Dsuper2 %>% filter(is.na(wtElba3__elba3Elba3.peakid) & (!is.na(elba1Elba3__elba3Elba3.peakid)))
elba1_indep = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & (!is.na(elba1Elba3__elba3Elba3.peakid)))
chip_subsets[["elba3_elba12indep"]] = elba3_elba12indep
chip_subsets[["elba3_elba12dep"]] = elba3_elba12dep
chip_subsets[["elba12_gained"]] =elba12_gained
chip_subsets[["elba1_gained"]] = elba1_gained
chip_subsets[["elba1_indep"]] = elba1_indep
########################### Elba1 #############################
elba1_wt = Dsuper2 %>% filter((!is.na(wtElba1__elba1Elba1.peakid)))
elba1_elba2mut = Dsuper2 %>% filter((!is.na(elba2Elba1__elba1Elba1.peakid)) )
elba3_elba2mut = Dsuper2 %>% filter((!is.na(elba2Elba3__elba3Elba3.peakid)))
chip_subsets[["elba1_wt"]] = elba1_wt
chip_subsets[["elba1_elba2mut"]] = elba1_elba2mut
chip_subsets[["elba3_elba2mut"]] = elba3_elba2mut
########################### Wt ################################
elba3_wt = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & is.na(wtInsv__insvInsv.peakid))
insv_wt = Dsuper2 %>% filter(!is.na(wtInsv__insvInsv.peakid) & is.na(wtElba3__elba3Elba3.peakid))
elba3_insv_wt = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & !is.na(wtInsv__insvInsv.peakid))
chip_subsets[["elba3_wt_noinsv"]] = elba3_wt
chip_subsets[["insv_wt_noElba3"]] = insv_wt
chip_subsets[["elba3_insv_wt"]] = elba3_insv_wt
########################### Elba3-only, insv-only, elba1/2/3 overlap, 4 factor overlap ###########################
elba3_elba12dep = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & is.na(elba1Elba3__elba3Elba3.peakid) & is.na(elba2Elba3__elba3Elba3.peakid))
elba3_only = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & is.na(wtElba1__elba1Elba1.peakid) & is.na(wtElba2__elba2Elba2.peakid) & is.na(wtInsv__insvInsv.peakid) )
insv_only = Dsuper2 %>% filter(!is.na(wtInsv__insvInsv.peakid) & is.na(wtElba1__elba1Elba1.peakid) & is.na(wtElba2__elba2Elba2.peakid) & is.na(wtElba3__elba3Elba3.peakid) )
insv_nonunique = Dsuper2 %>% filter(!is.na(wtInsv__insvInsv.peakid) & !mergedPeakId %in% insv_only$mergedPeakId)
elba123_ovlp = Dsuper2 %>% filter(!is.na(wtElba1__elba1Elba1.peakid) & !is.na(wtElba2__elba2Elba2.peakid) & !is.na(wtElba3__elba3Elba3.peakid) )
elba3.only_elba12dep_ovlp = elba3_elba12dep %>% filter(wtElba3__elba3Elba3.peakid %in% elba3_only$wtElba3__elba3Elba3.peakid)
elba123_ovlp.ovlp_insv = elba123_ovlp %>% filter(!is.na(wtInsv__insvInsv.peakid) )
elba123_ovlp.notovlp_insv = elba123_ovlp %>% filter( is.na(wtInsv__insvInsv.peakid) )
elba3insv_noelba12 = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & !is.na(wtInsv__insvInsv.peakid) & is.na(wtElba1__elba1Elba1.peakid) & is.na(wtElba2__elba2Elba2.peakid) )
elba13_noelba2_noinsv = Dsuper2 %>% filter((!is.na(wtElba3__elba3Elba3.peakid) | is.na(wtElba1__elba1Elba1.peakid)) & is.na(wtElba2__elba2Elba2.peakid) & is.na(wtInsv__insvInsv.peakid) )
chip_subsets[["elba3_only"]] = elba3_only
chip_subsets[["insv_only"]] = insv_only
chip_subsets[["insv_nonunique"]] = insv_nonunique
chip_subsets[["elba123_ovlp"]] = elba123_ovlp
chip_subsets[["elba123_ovlp.ovlp_insv"]] = elba123_ovlp.ovlp_insv
chip_subsets[["elba123_ovlp.notovlp_insv"]] = elba123_ovlp.notovlp_insv
chip_subsets[["elba3.only_elba12dep_ovlp"]] = elba3.only_elba12dep_ovlp
chip_subsets[["elba3insv_noelba12"]] = elba3insv_noelba12
chip_subsets[["elba13_noelba2_noinsv"]] = elba13_noelba2_noinsv
save(chip_subsets, file =paste(data_dir,"chip_elba_supertable_subsets.RData",sep=""))
########################################################################################################################################
######################### chipseq subsets: top 200 peaks with motif, peak with/without motifs ##########################################
get_genes <- function (genes) {
mygenes = unlist(strsplit(genes, split=";"))
mygenes = unique(mygenes)
mygenes
}
get_myset <-function (peak_l_uni) {
# peak_l_uni = diffpeak_peak_l_uni
mysets = list()
bns = names(peak_l_uni)[regexpr("q20", names(peak_l_uni), perl=T)!=-1 ]
for (bn in bns) {
dpeak0 = peak_l_uni[[bn]]
colnames(dpeak0)[colnames(dpeak0) == "PeakID"] = "peakid"
bn2 = gsub("_q20","", bn, perl=T)
dpeak = dpeak0 %>% arrange(desc(peakscore)) %>% dplyr::select(peakid, nearestTSS,dist2NearestTSS, peakscore, starts_with("insv"),"GAGA", "CP190" )
dpeak_insv = filter(dpeak, (insv_CCAATTGG != "" | insv_CCAATAAG != ""))
dpeak_insv_CCAATTGG = filter(dpeak, (insv_CCAATTGG != "" & insv_CCAATAAG == ""))
dpeak_insv_CCAATAAG = filter(dpeak, (insv_CCAATAAG != "" & insv_CCAATTGG == "" ))
dpeak_insvNOT = filter(dpeak, (insv_CCAATTGG == "" & insv_CCAATAAG == ""))
dpeak_CP190 = filter(dpeak, (CP190 != "" & (insv_CCAATTGG == "" & insv_CCAATAAG == "")))
dpeak_GAGA = filter(dpeak, (GAGA != "" & (insv_CCAATTGG == "" & insv_CCAATAAG == "")))
dpeak_insv_GAGA = filter(dpeak, (GAGA != "" & (insv_CCAATTGG != "" | insv_CCAATAAG != "")))
dpeak_tss2k = dpeak[abs(dpeak$dist2NearestTSS) <= 2000, ]
dyy0 = dpeak0 %>% filter(peakid %in% dpeak_tss2k$peakid) %>% dplyr::select(peakid,peakscore, nearestTSS, coord)
# create_beds(bn2, dyy0, nm = "insv")
dpeak_tss2k_insv = dpeak_insv %>% filter(peakid %in% dpeak_tss2k$peakid) %>% mutate(ntiles = ntile(peakscore,5))
dpeak_tss2k_insv_top200 = dpeak_tss2k_insv[1:200,]
dyy1 = dpeak0 %>% filter(peakid %in% dpeak_tss2k_insv_top200$peakid) %>% dplyr::select(peakid,peakscore, nearestTSS, coord)
# create_beds(bn2, dyy1, nm = "insv_top200")
dpeak_tss2k_insv_top100 = dpeak_tss2k_insv[1:100,]
dpeak_tss2k_insvNOT = filter(dpeak_insvNOT, peakid %in% dpeak_tss2k$peakid)
dyy2 = dpeak0 %>% filter(peakid %in% dpeak_tss2k_insvNOT$peakid) %>% dplyr::select(peakid,peakscore, nearestTSS, coord)
# create_beds(bn2, dyy2, nm = "insvNOT")
dpeak_tss2k_insv_CCAATTGG = filter(dpeak_insv_CCAATTGG, peakid %in% dpeak_tss2k$peakid)
dpeak_tss2k_insv_CCAATAAG = filter(dpeak_insv_CCAATAAG, peakid %in% dpeak_tss2k$peakid)
dpeak_tss2k_CP190 = filter(dpeak_CP190, peakid %in% dpeak_tss2k$peakid)
dpeak_tss2k_GAGA = filter(dpeak_GAGA, peakid %in% dpeak_tss2k$peakid)
dpeak_tss2k_insv_GAGA = filter(dpeak_insv_GAGA, peakid %in% dpeak_tss2k$peakid)
mysets[[paste(bn2, "__insv_tss2k", sep="")]] = get_genes(dpeak_tss2k_insv$nearestTSS)
mysets[[paste(bn2, "__insv_tss2k_top100", sep="")]] = get_genes(dpeak_tss2k_insv_top100$nearestTSS)
mysets[[paste(bn2, "__insv_tss2k_top200", sep="")]] = get_genes(dpeak_tss2k_insv_top200$nearestTSS)
mysets[[paste(bn2, "__insvNOT_tss2k", sep="")]] = get_genes(dpeak_tss2k_insvNOT$nearestTSS)
for (jj in 1:5) {
xx = dpeak_tss2k_insv %>% filter(ntiles == jj)
mysets[[paste(bn2, "__insv_tss2k_",jj,"tile", sep="")]] = get_genes(xx$nearestTSS)
}
}
print(lapply(mysets, length))
mysets
}
load( file = paste(data_dir,"diffpeaks_anno_q20TRUE_spikeinFALSE_correctedMotif.RData",sep=""))
diffpeak_peak_l_uni = peak_l_uni
ww = names(diffpeak_peak_l_uni)
ww = gsub("diff__|_q20", "", ww, perl=T)
ww = gsub("elba\\d+|insv|wt", "",ww, perl=T)
ww1 = gsub("(.*)__(.*)", "\\1",ww, perl=T)
ww2 = gsub("(.*)__(.*)", "\\2",ww, perl=T)
diffpeak_peak_l_uni = diffpeak_peak_l_uni[names(diffpeak_peak_l_uni)[which(ww1 == ww2)]]
diffpeak_peak_l_uni = diffpeak_peak_l_uni[names(diffpeak_peak_l_uni) %in% c("diff__wtElba1_q20__elba1Elba1_q20","diff__wtElba2_q20__elba2Elba2_q20","diff__wtElba3_q20__elba3Elba3_q20","diff__wtInsv_q20__insvInsv_q20")]
diffpeak_sets = get_myset(diffpeak_peak_l_uni)
names(diffpeak_sets) = gsub("diff__", "", names(diffpeak_sets), perl=T)
save(diffpeak_sets, file = paste(file=paste(data_dir, "genesets2.RData",sep="")))
|
/scripts/chipseq_merge_peaksets.R
|
no_license
|
jiayuwen/ELBA
|
R
| false
| false
| 18,904
|
r
|
#! /bin/env Rscript
library(Biostrings)
library(ggplot2)
library(tidyverse)
library(openxlsx)
library(venneuler)
library(getopt)
current_dir = "/home/jwen/projects/qi/elba/code_NC/"
setwd(current_dir)
data_dir = paste(current_dir, "data/",sep="")
res_dir = paste(current_dir, "res/",sep="")
peak_dir = paste(data_dir, "chipseq_peaks/",sep="")
############################################################################################################
############################################## function #######################################
create_beds <- function(bn_dir, dnp, nm, dist, dofasta=F) {
outf = paste(bn_dir,"/", nm,"_",dist,".bed", sep="")
unlink(outf)
write.table(dbed[,1:6],file=outf,row.names = F,col.names=F, quote = F,sep="\t",append = F)
outf_bb = paste(bn_dir,"/", nm,"_",dist,".bb", sep="")
system(paste("bedToBigBed ",outf," ",genome_f," ",outf_bb,sep=""))
if (dofasta) {
outfasta = paste(bn_dir,"/", nm,"_",dist,".fasta", sep="")
system(paste("fastaFromBed -fi ",genome_fasta," -name -s -bed ",outf," -fo ",outfasta,sep="" ))
ffseq = readDNAStringSet(outfasta)
names(ffseq) = gsub("\\((.*)", "",names(ffseq), perl=T)
writeXStringSet(ffseq, file=outfasta, width=20000)
}
}
call_fimo <- function (infile, motif_f, nm) {
outdir = paste(fimo_dir, nm, sep="")
system(paste("fimo --parse-genomic-coord --oc ", outdir," ", motif_f, " ",infile, sep=""))
# mast [options] <motif file> <sequence file>
}
mergeset <- function (indirs, dist, nm) {
lnfiles = NULL
for (jj in indirs) {
cat(jj,"\n")
infile = list.files(path=paste("../",jj,sep=""),pattern = "(.*)_narrowPeak.bed", full=T)
if (length(infile) == 1) {
lnfile = gsub("diff__|_q20|_narrowPeak.bed", "", basename(infile), perl=T)
system(paste("ln -sf ", infile, " ",lnfile, sep=""))
lnfiles = c(lnfiles, lnfile)
}
}
lnfiles_str = paste(sort(lnfiles,decreasing=T), sep="", collapse=" ")
outf = paste(nm, "_",dist,".txt", sep="")
vennf = paste(nm, "_",dist,"_venn.txt", sep="")
vennxls = paste(nm, "_",dist,"_venn.xls", sep="")
system(paste("mergePeaks -d ", dist, " ", lnfiles_str ," -venn ",vennf, " > ", outf,sep=""))
}
mergeset_anno <- function (indirs, outdir,dist="given", nm, savef) {
peakfile = paste(nm, "_given.txt",sep="")
dm = read.delim(peakfile,stringsAsFactors=F)
colnames(dm)[1] = c("mergedPeakId")
write.table(dm,file=peakfile,row.names = F,col.names=F, quote = F,sep="\t",append = T)
dm = dm %>% mutate(coord=paste(chr,":",start,"-", end,sep=""))
peakannofile = gsub(".txt", "_annot.txt", peakfile, perl=T)
statfile = gsub(".txt", "_stat.txt", peakfile, perl=T)
GO_dir = paste("GO/",sep="")
GenomeOntology_dir = paste("GenomeOntology/",sep="")
system(paste("annotatePeaks.pl ", peakfile," dm3 -go ", GO_dir, " -genomeOntology ",GenomeOntology_dir, " -annStats ", statfile, " > ", peakannofile,sep=""))
load(file = paste(data_dir,"diffpeaks_anno_q20TRUE_spikeinFALSE_correctedMotif.RData",sep=""))
selected = intersect(names(peak_l_uni), indirs)
Dsuper1 = NULL
Dsuper2 = select(dm, mergedPeakId) %>% distinct()
for (bn in selected) {
bn2 = gsub("_q20|diff__","",bn,perl=T)
ddi = peak_l_uni[[bn]]
ddi = ddi %>% dplyr::select(peakid, nearestTSS,peakscore, insv_CCAATTGG,insv_CCAATAAG, CP190, GAGA, BEAF32,EBox, SuH, matches("dist2NearestTSS|TSS_up2k|five_prime_UTR|three_prime_UTR|CDS|intron")) %>% distinct()
ddi[is.na(ddi)] = ""
dm2_i = dm[, regexpr(paste("mergedPeakId|",bn2,sep="",collapse=""), colnames(dm),perl=T) !=-1]
colnames(dm2_i) = c("mergedPeakId", "peakid")
npeaks = sapply(str_extract_all(paste(dm2_i$peakid,",",sep=""), ","), length)
dm2_i = mutate(dm2_i, npeaks=npeaks)
dm2_i = data.frame(mergedPeakId=dm2_i[rep(1:nrow(dm2_i), dm2_i$npeaks),"mergedPeakId"], peakid = unlist(strsplit(paste(dm2_i$peakid,",",sep=""),split=",")),stringsAsFactors=F )
dm2_i = dm2_i %>% inner_join(ddi)
dm2_i1 = dm2_i %>% dplyr::select( mergedPeakId,nearestTSS, insv_CCAATTGG, insv_CCAATAAG, CP190,GAGA, BEAF32,EBox, SuH, matches("dist2NearestTSS|TSS_up2k|five_prime_UTR|three_prime_UTR|CDS|intron"))
dm2_i2 = dm2_i %>% dplyr::select(mergedPeakId,peakid, peakscore) %>% distinct()
dm2_i2 = dm2_i2 %>% group_by(mergedPeakId) %>% summarise(peakid=paste(unique(peakid), collapse=";",sep=""), maxPeakscore=max(peakscore,na.rm = TRUE))
colnames(dm2_i2 )[2:ncol(dm2_i2 )] = paste(bn2,".",colnames(dm2_i2 )[2:ncol(dm2_i2 )],sep="" )
if (is.null (Dsuper1) ) {
Dsuper1 = dm2_i1
} else {
Dsuper1 = bind_rows(Dsuper1, dm2_i1)
}
Dsuper2 = Dsuper2 %>% left_join(dm2_i2)
}
Dsuper1 = Dsuper1 %>% distinct() %>% group_by(mergedPeakId) %>% summarise(nearestTSS=first(nearestTSS),
insv_CCAATTGG=paste(unique(insv_CCAATTGG), collapse=";",sep=""), insv_CCAATAAG=paste(unique(insv_CCAATAAG), collapse=";",sep=""),
CP190=paste(unique(CP190), collapse=";",sep=""), GAGA=paste(unique(GAGA), collapse=";",sep=""),
BEAF32=paste(unique(BEAF32), collapse=";",sep=""), EBox=paste(unique(EBox), collapse=";",sep=""), SuH=paste(unique(SuH), collapse=";",sep=""),
TSS_up2k=paste(unique(TSS_up2k), collapse=";",sep=""),five_prime_UTR=paste(unique(five_prime_UTR), collapse=";",sep=""),
three_prime_UTR=paste(unique(three_prime_UTR), collapse=";",sep=""),CDS=paste(unique(CDS), collapse=";",sep=""),
intron=paste(unique(intron), collapse=";",sep=""))
Dsuper1 = Dsuper1 %>% mutate(insv_CCAATTGG = gsub("^;|;$","",insv_CCAATTGG,perl=T)) %>% mutate(insv_CCAATAAG=gsub("^;|;$","",insv_CCAATAAG,perl=T)) %>%
mutate(CP190=gsub("^;|;$","",CP190,perl=T)) %>% mutate(GAGA=gsub("^;|;$","",GAGA,perl=T)) %>%
mutate(BEAF32=gsub("^;|;$","",BEAF32,perl=T)) %>% mutate(EBox=gsub("^;|;$","",EBox,perl=T)) %>% mutate(SuH=gsub("^;|;$","",SuH,perl=T)) %>%
mutate(TSS_up2k=gsub("^;|;$","",TSS_up2k,perl=T), five_prime_UTR=gsub("^;|;$","",five_prime_UTR,perl=T),three_prime_UTR=gsub("^;|;$","",three_prime_UTR,perl=T)) %>%
mutate(CDS=gsub("^;|;$","",CDS,perl=T), intron=gsub("^;|;$","",intron,perl=T))
Dsuper = select(dm, mergedPeakId, coord, Total.subpeaks) %>% distinct() %>% inner_join(Dsuper1) %>% inner_join(Dsuper2) %>%
mutate(maxScore=apply(cbind(wtElba1__elba1Elba1.maxPeakscore, wtElba2__elba2Elba2.maxPeakscore, wtElba3__elba3Elba3.maxPeakscore, wtInsv__insvInsv.maxPeakscore), 1, max, na.rm = TRUE)) %>%
arrange(desc(maxScore))
dd = read.delim(peakannofile, stringsAsFactors=F, header=T)
colnames(dd)[1] = "PeakID"
dd$Focus.Ratio.Region.Size = gsub("_peaks.txt","", dd$Focus.Ratio.Region.Size)
dd = dd %>% filter(PeakID %in% Dsuper$mergedPeakId) %>%
mutate(Annotation=gsub(" UTR", "UTR", Annotation)) %>%
mutate(genomic_region=gsub("\\s+(.*)", "", Annotation)) %>%
arrange(desc(Peak.Score)) %>%
select(PeakID, Gene.Name, genomic_region, Distance.to.TSS)
Dsuper2 = Dsuper %>% select(mergedPeakId, coord, insv_CCAATTGG, insv_CCAATAAG, CP190, GAGA,BEAF32,EBox, SuH, matches("peakid"),maxScore)%>%
left_join(dd, by=c("mergedPeakId"="PeakID")) %>%
select(mergedPeakId, coord, genomic_region, Gene.Name, Distance.to.TSS, everything())
Dsuper2 = Dsuper2 %>% mutate(motifs = paste(insv_CCAATTGG,insv_CCAATAAG,CP190,GAGA,sep=";"))
Dsuper2 = Dsuper2 %>% mutate(motifs=gsub("\\;{2,}",";", motifs)) %>% mutate(motifs=gsub("^\\;{1,}","", motifs)) %>% mutate(motifs=gsub("\\;{1,}$","", motifs))
Dsuper2[Dsuper2$motifs %in% "", "motifs"] = "nomotifs"
Dsuper2 = Dsuper2 %>% mutate(motifsCombine = "")
Dsuper2[grepl("insv_CCAATTGG;insv_CCAATAAG", Dsuper2$motifs) & (Dsuper2$motifsCombine %in% ""), "motifsCombine"] = "insv_CCAATTGG;insv_CCAATAAG"
Dsuper2[grepl("insv_CCAATTGG", Dsuper2$motifs) & (Dsuper2$motifsCombine %in% ""), "motifsCombine"] = "insv_CCAATTGG"
Dsuper2[grepl("insv_CCAATAAG", Dsuper2$motifs) & (Dsuper2$motifsCombine %in% ""), "motifsCombine"] = "insv_CCAATAAG"
table(Dsuper2$motifsCombine)
dm = dm %>% select(matches("mergedPeakId|Elba|Insv"))
dm_peak = NULL
flist = list()
for (ii in 2:ncol(dm)) {
bn = colnames(dm)[ii]
dm_i = dm[, c("mergedPeakId", bn)]
colnames(dm_i) = c("mergedPeakId", "peak")
dm_i = dm_i %>% filter(peak != "")
flist[[bn]] = unique(dm_i$mergedPeakId)
dm_peak_i = data.frame(mergedPeakId=unique(dm_i$mergedPeakId), factor_motif=bn, stringsAsFactors=F)
dm_peak = rbind(dm_peak, dm_peak_i)
}
# save(flist,dm_peak,file=paste(RData_dir,"Elba_merge_all_mutant_peaks.RData",sep="") )
save(Dsuper,Dsuper2,dd,flist,dm_peak,file=savef)
}
############################################################################################################
################################################# merge diff peak ##########################################
setwd(peak_dir)
macs_dirs = list.dirs(path = ".", full.names = FALSE, recursive = FALSE)
######## 4 major binding sites: wt vs cognate mutant ########
indirs = macs_dirs[grepl("wtElba1_q20__elba1Elba1|wtElba2_q20__elba2Elba2|wtElba3_q20__elba3Elba3|wtInsv_q20__insvInsv",macs_dirs)]
outdir = paste("merge__major", sep="")
dir.create(outdir)
setwd(outdir)
mergeset(indirs, dist="given", nm="merge_major")
mergeset_anno(indirs, outdir,dist="given", nm="merge_major", savef = paste(data_dir, "chip_supertable_with_RNAseq_correctedMotif.RData",sep=""))
######## wt+mutant vs cognate mutant: 16 ########
setwd(peak_dir)
indirs = macs_dirs[grepl("Elba1_q20__elba1Elba1|Elba3_q20__elba3Elba3|Elba2_q20__elba2Elba2|Insv_q20__insvInsv",macs_dirs)]
outdir = paste("merge__major_ElbaAntibody", sep="")
dir.create(outdir)
setwd(outdir)
mergeset(indirs, dist="given", nm="mergemajor_ElbaAntibody")
mergeset_anno(indirs, outdir,dist="given", nm="mergemajor_ElbaAntibody", savef = paste(data_dir, "chip_ElbaAntibody_supertable_with_RNAseq_correctedMotif.RData",sep=""))
############################################################################################################
################################################# chipseq subsets ##########################################
load(file=paste(data_dir, "chip_ElbaAntibody_supertable_with_RNAseq_correctedMotif.RData",sep=""))
Dsuper2 = Dsuper2[!is.infinite(Dsuper2$maxScore), ]
chip_subsets = list()
########################### Wt major ###########################
chip_subsets[["wtElba1"]] = Dsuper2 %>% filter(!is.na(wtElba1__elba1Elba1.peakid))
chip_subsets[["wtElba2"]] = Dsuper2 %>% filter(!is.na(wtElba2__elba2Elba2.peakid))
chip_subsets[["wtElba3"]] = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid))
chip_subsets[["wtInsv"]] = Dsuper2 %>% filter(!is.na(wtInsv__insvInsv.peakid))
########################### Elba3 #############################
elba3_elba12dep = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & is.na(elba1Elba3__elba3Elba3.peakid) & is.na(elba2Elba3__elba3Elba3.peakid))
elba3_elba12indep = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & (!is.na(elba1Elba3__elba3Elba3.peakid) | !is.na(elba2Elba3__elba3Elba3.peakid)))
elba12_gained = Dsuper2 %>% filter(is.na(wtElba3__elba3Elba3.peakid) & (!is.na(elba1Elba3__elba3Elba3.peakid) | !is.na(elba2Elba3__elba3Elba3.peakid)))
elba1_gained = Dsuper2 %>% filter(is.na(wtElba3__elba3Elba3.peakid) & (!is.na(elba1Elba3__elba3Elba3.peakid)))
elba1_indep = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & (!is.na(elba1Elba3__elba3Elba3.peakid)))
chip_subsets[["elba3_elba12indep"]] = elba3_elba12indep
chip_subsets[["elba3_elba12dep"]] = elba3_elba12dep
chip_subsets[["elba12_gained"]] =elba12_gained
chip_subsets[["elba1_gained"]] = elba1_gained
chip_subsets[["elba1_indep"]] = elba1_indep
########################### Elba1 #############################
elba1_wt = Dsuper2 %>% filter((!is.na(wtElba1__elba1Elba1.peakid)))
elba1_elba2mut = Dsuper2 %>% filter((!is.na(elba2Elba1__elba1Elba1.peakid)) )
elba3_elba2mut = Dsuper2 %>% filter((!is.na(elba2Elba3__elba3Elba3.peakid)))
chip_subsets[["elba1_wt"]] = elba1_wt
chip_subsets[["elba1_elba2mut"]] = elba1_elba2mut
chip_subsets[["elba3_elba2mut"]] = elba3_elba2mut
########################### Wt ################################
elba3_wt = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & is.na(wtInsv__insvInsv.peakid))
insv_wt = Dsuper2 %>% filter(!is.na(wtInsv__insvInsv.peakid) & is.na(wtElba3__elba3Elba3.peakid))
elba3_insv_wt = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & !is.na(wtInsv__insvInsv.peakid))
chip_subsets[["elba3_wt_noinsv"]] = elba3_wt
chip_subsets[["insv_wt_noElba3"]] = insv_wt
chip_subsets[["elba3_insv_wt"]] = elba3_insv_wt
########################### Elba3-only, insv-only, elba1/2/3 overlap, 4 factor overlap ###########################
elba3_elba12dep = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & is.na(elba1Elba3__elba3Elba3.peakid) & is.na(elba2Elba3__elba3Elba3.peakid))
elba3_only = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & is.na(wtElba1__elba1Elba1.peakid) & is.na(wtElba2__elba2Elba2.peakid) & is.na(wtInsv__insvInsv.peakid) )
insv_only = Dsuper2 %>% filter(!is.na(wtInsv__insvInsv.peakid) & is.na(wtElba1__elba1Elba1.peakid) & is.na(wtElba2__elba2Elba2.peakid) & is.na(wtElba3__elba3Elba3.peakid) )
insv_nonunique = Dsuper2 %>% filter(!is.na(wtInsv__insvInsv.peakid) & !mergedPeakId %in% insv_only$mergedPeakId)
elba123_ovlp = Dsuper2 %>% filter(!is.na(wtElba1__elba1Elba1.peakid) & !is.na(wtElba2__elba2Elba2.peakid) & !is.na(wtElba3__elba3Elba3.peakid) )
elba3.only_elba12dep_ovlp = elba3_elba12dep %>% filter(wtElba3__elba3Elba3.peakid %in% elba3_only$wtElba3__elba3Elba3.peakid)
elba123_ovlp.ovlp_insv = elba123_ovlp %>% filter(!is.na(wtInsv__insvInsv.peakid) )
elba123_ovlp.notovlp_insv = elba123_ovlp %>% filter( is.na(wtInsv__insvInsv.peakid) )
elba3insv_noelba12 = Dsuper2 %>% filter(!is.na(wtElba3__elba3Elba3.peakid) & !is.na(wtInsv__insvInsv.peakid) & is.na(wtElba1__elba1Elba1.peakid) & is.na(wtElba2__elba2Elba2.peakid) )
elba13_noelba2_noinsv = Dsuper2 %>% filter((!is.na(wtElba3__elba3Elba3.peakid) | is.na(wtElba1__elba1Elba1.peakid)) & is.na(wtElba2__elba2Elba2.peakid) & is.na(wtInsv__insvInsv.peakid) )
chip_subsets[["elba3_only"]] = elba3_only
chip_subsets[["insv_only"]] = insv_only
chip_subsets[["insv_nonunique"]] = insv_nonunique
chip_subsets[["elba123_ovlp"]] = elba123_ovlp
chip_subsets[["elba123_ovlp.ovlp_insv"]] = elba123_ovlp.ovlp_insv
chip_subsets[["elba123_ovlp.notovlp_insv"]] = elba123_ovlp.notovlp_insv
chip_subsets[["elba3.only_elba12dep_ovlp"]] = elba3.only_elba12dep_ovlp
chip_subsets[["elba3insv_noelba12"]] = elba3insv_noelba12
chip_subsets[["elba13_noelba2_noinsv"]] = elba13_noelba2_noinsv
save(chip_subsets, file =paste(data_dir,"chip_elba_supertable_subsets.RData",sep=""))
########################################################################################################################################
######################### chipseq subsets: top 200 peaks with motif, peak with/without motifs ##########################################
get_genes <- function (genes) {
mygenes = unlist(strsplit(genes, split=";"))
mygenes = unique(mygenes)
mygenes
}
get_myset <-function (peak_l_uni) {
# peak_l_uni = diffpeak_peak_l_uni
mysets = list()
bns = names(peak_l_uni)[regexpr("q20", names(peak_l_uni), perl=T)!=-1 ]
for (bn in bns) {
dpeak0 = peak_l_uni[[bn]]
colnames(dpeak0)[colnames(dpeak0) == "PeakID"] = "peakid"
bn2 = gsub("_q20","", bn, perl=T)
dpeak = dpeak0 %>% arrange(desc(peakscore)) %>% dplyr::select(peakid, nearestTSS,dist2NearestTSS, peakscore, starts_with("insv"),"GAGA", "CP190" )
dpeak_insv = filter(dpeak, (insv_CCAATTGG != "" | insv_CCAATAAG != ""))
dpeak_insv_CCAATTGG = filter(dpeak, (insv_CCAATTGG != "" & insv_CCAATAAG == ""))
dpeak_insv_CCAATAAG = filter(dpeak, (insv_CCAATAAG != "" & insv_CCAATTGG == "" ))
dpeak_insvNOT = filter(dpeak, (insv_CCAATTGG == "" & insv_CCAATAAG == ""))
dpeak_CP190 = filter(dpeak, (CP190 != "" & (insv_CCAATTGG == "" & insv_CCAATAAG == "")))
dpeak_GAGA = filter(dpeak, (GAGA != "" & (insv_CCAATTGG == "" & insv_CCAATAAG == "")))
dpeak_insv_GAGA = filter(dpeak, (GAGA != "" & (insv_CCAATTGG != "" | insv_CCAATAAG != "")))
dpeak_tss2k = dpeak[abs(dpeak$dist2NearestTSS) <= 2000, ]
dyy0 = dpeak0 %>% filter(peakid %in% dpeak_tss2k$peakid) %>% dplyr::select(peakid,peakscore, nearestTSS, coord)
# create_beds(bn2, dyy0, nm = "insv")
dpeak_tss2k_insv = dpeak_insv %>% filter(peakid %in% dpeak_tss2k$peakid) %>% mutate(ntiles = ntile(peakscore,5))
dpeak_tss2k_insv_top200 = dpeak_tss2k_insv[1:200,]
dyy1 = dpeak0 %>% filter(peakid %in% dpeak_tss2k_insv_top200$peakid) %>% dplyr::select(peakid,peakscore, nearestTSS, coord)
# create_beds(bn2, dyy1, nm = "insv_top200")
dpeak_tss2k_insv_top100 = dpeak_tss2k_insv[1:100,]
dpeak_tss2k_insvNOT = filter(dpeak_insvNOT, peakid %in% dpeak_tss2k$peakid)
dyy2 = dpeak0 %>% filter(peakid %in% dpeak_tss2k_insvNOT$peakid) %>% dplyr::select(peakid,peakscore, nearestTSS, coord)
# create_beds(bn2, dyy2, nm = "insvNOT")
dpeak_tss2k_insv_CCAATTGG = filter(dpeak_insv_CCAATTGG, peakid %in% dpeak_tss2k$peakid)
dpeak_tss2k_insv_CCAATAAG = filter(dpeak_insv_CCAATAAG, peakid %in% dpeak_tss2k$peakid)
dpeak_tss2k_CP190 = filter(dpeak_CP190, peakid %in% dpeak_tss2k$peakid)
dpeak_tss2k_GAGA = filter(dpeak_GAGA, peakid %in% dpeak_tss2k$peakid)
dpeak_tss2k_insv_GAGA = filter(dpeak_insv_GAGA, peakid %in% dpeak_tss2k$peakid)
mysets[[paste(bn2, "__insv_tss2k", sep="")]] = get_genes(dpeak_tss2k_insv$nearestTSS)
mysets[[paste(bn2, "__insv_tss2k_top100", sep="")]] = get_genes(dpeak_tss2k_insv_top100$nearestTSS)
mysets[[paste(bn2, "__insv_tss2k_top200", sep="")]] = get_genes(dpeak_tss2k_insv_top200$nearestTSS)
mysets[[paste(bn2, "__insvNOT_tss2k", sep="")]] = get_genes(dpeak_tss2k_insvNOT$nearestTSS)
for (jj in 1:5) {
xx = dpeak_tss2k_insv %>% filter(ntiles == jj)
mysets[[paste(bn2, "__insv_tss2k_",jj,"tile", sep="")]] = get_genes(xx$nearestTSS)
}
}
print(lapply(mysets, length))
mysets
}
load( file = paste(data_dir,"diffpeaks_anno_q20TRUE_spikeinFALSE_correctedMotif.RData",sep=""))
diffpeak_peak_l_uni = peak_l_uni
ww = names(diffpeak_peak_l_uni)
ww = gsub("diff__|_q20", "", ww, perl=T)
ww = gsub("elba\\d+|insv|wt", "",ww, perl=T)
ww1 = gsub("(.*)__(.*)", "\\1",ww, perl=T)
ww2 = gsub("(.*)__(.*)", "\\2",ww, perl=T)
diffpeak_peak_l_uni = diffpeak_peak_l_uni[names(diffpeak_peak_l_uni)[which(ww1 == ww2)]]
diffpeak_peak_l_uni = diffpeak_peak_l_uni[names(diffpeak_peak_l_uni) %in% c("diff__wtElba1_q20__elba1Elba1_q20","diff__wtElba2_q20__elba2Elba2_q20","diff__wtElba3_q20__elba3Elba3_q20","diff__wtInsv_q20__insvInsv_q20")]
diffpeak_sets = get_myset(diffpeak_peak_l_uni)
names(diffpeak_sets) = gsub("diff__", "", names(diffpeak_sets), perl=T)
save(diffpeak_sets, file = paste(file=paste(data_dir, "genesets2.RData",sep="")))
|
# Addressing reviewer comments
# Sept 2020
library(data.table)
library(dplyr)
library(Matrix)
library(DropletUtils)
library(scran)
library(biomaRt)
library(scater)
library(vegan)
library(PCAtools)
library(cowplot)
library(ggplot2)
library(ggtext)
library(grid)
library(gridExtra)
# get data from Izar et al. 2020 - download as GSE
# make metadata and expression objects
ov_sce_meta <- t(head(fread('~/Documents/R/onecarbon/data/GSE146026_Izar_HGSOC_ascites_10x_log.tsv'), 7))
colnames(ov_sce_meta) <- ov_sce_meta[1, ]
ov_sce_meta <- data.frame(ov_sce_meta[-1, ])
ov_sce <- fread('~/Documents/R/onecarbon/data/GSE146026_Izar_HGSOC_ascites_10x_log.tsv', skip=8)
ov_sce_expr <- Matrix(data.matrix(ov_sce)[, -1])
rownames(ov_sce_expr) <- ov_sce$V1
colnames(ov_sce_expr) <- ov_sce_meta$X10x_barcode
# extract relevant markers, as well as tSNE embeddings
dset <- t(as.matrix(ov_sce_expr[c('GZMB', 'AOX1','COL1A1', 'PTPRC', 'GNLY', 'KLRB1', 'NNMT', 'EPCAM', 'CD8A', 'CD4', 'NKG7', 'CD3D'), ]))
dset <- data.table(X1=ov_sce_meta$TSNE_x, X2=ov_sce_meta$TSNE_y, dset)
dset <- dset[ , lapply(.SD, as.numeric)]
# make supp figure
theme_set(theme_cowplot())
g1 <- ggplot(dset, aes(x=X1, y=X2, color=CD3D)) + geom_point() +
scale_color_gradient("*CD3D*",
low=rgb(0.7,0.7,0.7,0.1),
high=rgb(0,0,0.7,0.8)) +
xlab('') + ylab('') +
theme(legend.title = element_markdown())
g2 <- ggplot(dset, aes(x=X1, y=X2, color=EPCAM)) + geom_point() +
scale_color_gradient("*EPCAM*",
low=rgb(0.7,0.7,0.7,0.1),
high=rgb(0,0,0.7,0.8)) +
xlab('') + ylab('') +
theme(legend.title = element_markdown())
g3 <- ggplot(dset, aes(x=X1, y=X2, color=COL1A1)) + geom_point() +
scale_color_gradient("*COL1A1*",
low=rgb(0.7,0.7,0.7,0.1),
high=rgb(0,0,0.7,0.8)) +
xlab('') + ylab('') +
theme(legend.title = element_markdown())
g4 <- ggplot(dset, aes(x=X1, y=X2, color=PTPRC)) + geom_point() +
scale_color_gradient("*PTPRC*",
low=rgb(0.7,0.7,0.7,0.1),
high=rgb(0,0,0.7,0.8)) +
theme(legend.title = element_markdown()) +
xlab('') + ylab('')
g5 <- ggplot(dset, aes(x=X1, y=X2, color=NNMT)) + geom_point() +
scale_color_gradient("*NNMT*",
low=rgb(0.7,0.7,0.7,0.1),
high=rgb(0,0,0.7,0.8)) +
theme(legend.title = element_markdown()) +
xlab('') + ylab('')
g6 <- ggplot(dset, aes(x=X1, y=X2, color=AOX1)) + geom_point() +
scale_color_gradient("*AOX1*",
low=rgb(0.7,0.7,0.7,0.1),
high=rgb(0,0,0.7,0.8)) +
theme(legend.title = element_markdown()) +
xlab('') + ylab('')
p_all <- plot_grid(g5, g1, g2, g3, g4, g6)
y_grob <- textGrob('t-SNE2',
gp=gpar(col="black", fontsize=15), rot=90)
x_grob <- textGrob('t-SNE1',
gp=gpar(col="black", fontsize=15))
grid.arrange(arrangeGrob(p_all,
left = y_grob,
bottom = x_grob))
dev.copy2pdf(file='~/Documents/R/onecarbon/figures/Supp_Izar_et_al_NNMT_expression.pdf', width=10, height=5)
|
/scripts/06_revisions.R
|
no_license
|
vicDRC/BCCJJL01_ovarian
|
R
| false
| false
| 3,200
|
r
|
# Addressing reviewer comments
# Sept 2020
library(data.table)
library(dplyr)
library(Matrix)
library(DropletUtils)
library(scran)
library(biomaRt)
library(scater)
library(vegan)
library(PCAtools)
library(cowplot)
library(ggplot2)
library(ggtext)
library(grid)
library(gridExtra)
# get data from Izar et al. 2020 - download as GSE
# make metadata and expression objects
ov_sce_meta <- t(head(fread('~/Documents/R/onecarbon/data/GSE146026_Izar_HGSOC_ascites_10x_log.tsv'), 7))
colnames(ov_sce_meta) <- ov_sce_meta[1, ]
ov_sce_meta <- data.frame(ov_sce_meta[-1, ])
ov_sce <- fread('~/Documents/R/onecarbon/data/GSE146026_Izar_HGSOC_ascites_10x_log.tsv', skip=8)
ov_sce_expr <- Matrix(data.matrix(ov_sce)[, -1])
rownames(ov_sce_expr) <- ov_sce$V1
colnames(ov_sce_expr) <- ov_sce_meta$X10x_barcode
# extract relevant markers, as well as tSNE embeddings
dset <- t(as.matrix(ov_sce_expr[c('GZMB', 'AOX1','COL1A1', 'PTPRC', 'GNLY', 'KLRB1', 'NNMT', 'EPCAM', 'CD8A', 'CD4', 'NKG7', 'CD3D'), ]))
dset <- data.table(X1=ov_sce_meta$TSNE_x, X2=ov_sce_meta$TSNE_y, dset)
dset <- dset[ , lapply(.SD, as.numeric)]
# make supp figure
theme_set(theme_cowplot())
g1 <- ggplot(dset, aes(x=X1, y=X2, color=CD3D)) + geom_point() +
scale_color_gradient("*CD3D*",
low=rgb(0.7,0.7,0.7,0.1),
high=rgb(0,0,0.7,0.8)) +
xlab('') + ylab('') +
theme(legend.title = element_markdown())
g2 <- ggplot(dset, aes(x=X1, y=X2, color=EPCAM)) + geom_point() +
scale_color_gradient("*EPCAM*",
low=rgb(0.7,0.7,0.7,0.1),
high=rgb(0,0,0.7,0.8)) +
xlab('') + ylab('') +
theme(legend.title = element_markdown())
g3 <- ggplot(dset, aes(x=X1, y=X2, color=COL1A1)) + geom_point() +
scale_color_gradient("*COL1A1*",
low=rgb(0.7,0.7,0.7,0.1),
high=rgb(0,0,0.7,0.8)) +
xlab('') + ylab('') +
theme(legend.title = element_markdown())
g4 <- ggplot(dset, aes(x=X1, y=X2, color=PTPRC)) + geom_point() +
scale_color_gradient("*PTPRC*",
low=rgb(0.7,0.7,0.7,0.1),
high=rgb(0,0,0.7,0.8)) +
theme(legend.title = element_markdown()) +
xlab('') + ylab('')
g5 <- ggplot(dset, aes(x=X1, y=X2, color=NNMT)) + geom_point() +
scale_color_gradient("*NNMT*",
low=rgb(0.7,0.7,0.7,0.1),
high=rgb(0,0,0.7,0.8)) +
theme(legend.title = element_markdown()) +
xlab('') + ylab('')
g6 <- ggplot(dset, aes(x=X1, y=X2, color=AOX1)) + geom_point() +
scale_color_gradient("*AOX1*",
low=rgb(0.7,0.7,0.7,0.1),
high=rgb(0,0,0.7,0.8)) +
theme(legend.title = element_markdown()) +
xlab('') + ylab('')
p_all <- plot_grid(g5, g1, g2, g3, g4, g6)
y_grob <- textGrob('t-SNE2',
gp=gpar(col="black", fontsize=15), rot=90)
x_grob <- textGrob('t-SNE1',
gp=gpar(col="black", fontsize=15))
grid.arrange(arrangeGrob(p_all,
left = y_grob,
bottom = x_grob))
dev.copy2pdf(file='~/Documents/R/onecarbon/figures/Supp_Izar_et_al_NNMT_expression.pdf', width=10, height=5)
|
# Robert Dinterman
print(paste0("Started 1-USDA_Evaluation_Stata_Export at ", Sys.time()))
library(dplyr)
# Create a directory for the data
localDir <- "1-Organization/USDA_Evaluation"
if (!file.exists(localDir)) dir.create(localDir)
load("1-Organization/USDA_Evaluation/Final.Rda")
data %>%
group_by(zip, year, STATE, fips, ruc03, metro03, long, lat, CPI) %>%
mutate(HHINC_IRS_R = AGI_IRS_R*1000 / HH_IRS,
HHWAGE_IRS_R = Wages_IRS_R*1000 / HH_IRS) %>%
select(Prov_num, emp:emp_, Pop_IRS, HHINC_IRS_R, HHWAGE_IRS_R,
ap_R, qp1_R, POV_ALL_P, roughness, slope, tri, AREA_cty, AREA_zcta,
loans, ploans, biploans1234) %>%
summarise_each(funs(mean)) -> data
data$logINC <- ifelse(data$HHINC_IRS_R < 1, 0, log(data$HHINC_IRS_R))
data$Prov_ord <- cut(data$Prov_num, breaks = c(0, 2, 3, 5.5, 7.5, 10, Inf),
labels = c("None", "Suppressed", "Moderate", "Good",
"High", "Excellent"), right = F)
data$iloans <- (data$loans > 0)*1
data$ipilot <- (data$ploans > 0)*1
data$ibip <- (data$biploans1234 > 0)*1
data$logest <- log(data$est)
data$logemp_ <- log(data$emp_ + 1)
data$logPop_IRS <- log(data$Pop_IRS)
data$logHHWage <- log(data$HHWAGE_IRS_R)
data$logap_R <- log(data$ap_R + 1)
data$logemp <- log(data$emp + 1)
data$logqp1_R <- log(data$qp1_R + 1)
data$ruc <- factor(data$ruc03)
levels(data$ruc) <- list("metro" = 1:3, "adj" = c(4,6,8),
"nonadj" = c(5,7,9))
# export data frame to Stata binary format
library(foreign)
library(readr)
write.dta(data, paste0(localDir, "/Stata_USDA_Eval.dta"))
write_csv(data, paste0(localDir, "/Stata_USDA_Eval.csv"))
rm(list = ls())
print(paste0("Finished 1-USDA_Evaluation_Stata_Export at ", Sys.time()))
|
/1-Organization/1-USDA_Evaluation_Stata_Export.R
|
no_license
|
yalunsu/test-counties
|
R
| false
| false
| 1,794
|
r
|
# Robert Dinterman
print(paste0("Started 1-USDA_Evaluation_Stata_Export at ", Sys.time()))
library(dplyr)
# Create a directory for the data
localDir <- "1-Organization/USDA_Evaluation"
if (!file.exists(localDir)) dir.create(localDir)
load("1-Organization/USDA_Evaluation/Final.Rda")
data %>%
group_by(zip, year, STATE, fips, ruc03, metro03, long, lat, CPI) %>%
mutate(HHINC_IRS_R = AGI_IRS_R*1000 / HH_IRS,
HHWAGE_IRS_R = Wages_IRS_R*1000 / HH_IRS) %>%
select(Prov_num, emp:emp_, Pop_IRS, HHINC_IRS_R, HHWAGE_IRS_R,
ap_R, qp1_R, POV_ALL_P, roughness, slope, tri, AREA_cty, AREA_zcta,
loans, ploans, biploans1234) %>%
summarise_each(funs(mean)) -> data
data$logINC <- ifelse(data$HHINC_IRS_R < 1, 0, log(data$HHINC_IRS_R))
data$Prov_ord <- cut(data$Prov_num, breaks = c(0, 2, 3, 5.5, 7.5, 10, Inf),
labels = c("None", "Suppressed", "Moderate", "Good",
"High", "Excellent"), right = F)
data$iloans <- (data$loans > 0)*1
data$ipilot <- (data$ploans > 0)*1
data$ibip <- (data$biploans1234 > 0)*1
data$logest <- log(data$est)
data$logemp_ <- log(data$emp_ + 1)
data$logPop_IRS <- log(data$Pop_IRS)
data$logHHWage <- log(data$HHWAGE_IRS_R)
data$logap_R <- log(data$ap_R + 1)
data$logemp <- log(data$emp + 1)
data$logqp1_R <- log(data$qp1_R + 1)
data$ruc <- factor(data$ruc03)
levels(data$ruc) <- list("metro" = 1:3, "adj" = c(4,6,8),
"nonadj" = c(5,7,9))
# export data frame to Stata binary format
library(foreign)
library(readr)
write.dta(data, paste0(localDir, "/Stata_USDA_Eval.dta"))
write_csv(data, paste0(localDir, "/Stata_USDA_Eval.csv"))
rm(list = ls())
print(paste0("Finished 1-USDA_Evaluation_Stata_Export at ", Sys.time()))
|
\name{sumry_continous}
\alias{sumry_continous}
\title{Summary of continuous variable
}
\description{
Function gives detailed Summary of continuous variable
}
\usage{
sumry_continous(cont_var_test)
}
\arguments{
\item{cont_var_test}{
cont_var_test: a Dataset that has only continous(numeric) variable
}
}
\details{
This function does a very good quality check on continuous (numeric) data using variables like min,max,mean and also gives the percentile value
}
\value{
It returns Data Frame
}
\references{
www.surgicalcriticalcare.net/Statistics/continuous.pdf
}
\author{
Rahul Mehta
}
\seealso{
sumry_cat for categorical variable
}
\examples{
data(iris)
#first identify continuous variable from the dataset using following function
cont_data=ident_cont(iris)
summry_cont=sumry_continous(cont_data)
}
|
/man/sumry_continous.Rd
|
no_license
|
cran/RDIDQ
|
R
| false
| false
| 848
|
rd
|
\name{sumry_continous}
\alias{sumry_continous}
\title{Summary of continuous variable
}
\description{
Function gives detailed Summary of continuous variable
}
\usage{
sumry_continous(cont_var_test)
}
\arguments{
\item{cont_var_test}{
cont_var_test: a Dataset that has only continous(numeric) variable
}
}
\details{
This function does a very good quality check on continuous (numeric) data using variables like min,max,mean and also gives the percentile value
}
\value{
It returns Data Frame
}
\references{
www.surgicalcriticalcare.net/Statistics/continuous.pdf
}
\author{
Rahul Mehta
}
\seealso{
sumry_cat for categorical variable
}
\examples{
data(iris)
#first identify continuous variable from the dataset using following function
cont_data=ident_cont(iris)
summry_cont=sumry_continous(cont_data)
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/createModels.R
\name{replaceBodyTags}
\alias{replaceBodyTags}
\title{Replace Body Tags}
\usage{
replaceBodyTags(bodySection, bodyTags, initCollection)
}
\arguments{
\item{bodySection}{}
\item{bodyTags}{}
\item{initCollection}{The list of all arguments parsed from the init section}
}
\value{
Returns updated bodySection
}
\description{
To do: fill in some details
}
\keyword{internal}
|
/man/replaceBodyTags.Rd
|
no_license
|
clbustos/MplusAutomation
|
R
| false
| false
| 497
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/createModels.R
\name{replaceBodyTags}
\alias{replaceBodyTags}
\title{Replace Body Tags}
\usage{
replaceBodyTags(bodySection, bodyTags, initCollection)
}
\arguments{
\item{bodySection}{}
\item{bodyTags}{}
\item{initCollection}{The list of all arguments parsed from the init section}
}
\value{
Returns updated bodySection
}
\description{
To do: fill in some details
}
\keyword{internal}
|
with(a33ec5d63787c4760a0876ec8277ff991, {ROOT <- 'C:/semoss/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/f6ba5938-ef1b-4430-b7b0-261d1cc8174d';rm(list=ls())});
|
/f6ba5938-ef1b-4430-b7b0-261d1cc8174d/R/Temp/aK86ek3rf8wqw.R
|
no_license
|
ayanmanna8/test
|
R
| false
| false
| 183
|
r
|
with(a33ec5d63787c4760a0876ec8277ff991, {ROOT <- 'C:/semoss/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/f6ba5938-ef1b-4430-b7b0-261d1cc8174d';rm(list=ls())});
|
# TODO: Add comment
#
# Author: Giorgio Spedicato
###############################################################################
# need to set up Rcpp calls
#CLASSES DEFINITIONS
setClass("lifetable", #classe lifetable
representation(x="numeric",lx="numeric",name="character"),
prototype(x=c(0,1,2,3),
lx=c(100,90,50,10),
name="Generic life table"
)
)
#actuarial classes
setClass("actuarialtable",
representation=representation(interest="numeric"),
contains="lifetable",
prototype(x=c(0,1,2,3),
lx=c(100,90,50,10),
name="Generic actuarial table",
interest=0.03
)
)
#METHODS DEFINITIONS
#constructor for lifetable object
# lifetable <- function(x = 0:3, lx = c(100,90, 50, 10), name = "Generic life table") {
# if(length(x) != length(lx)) stop("length of x and lx must be equal")
#
# posToRemove <- which(lx %in% c(0,NA))
# if(length(posToRemove) > 0) {
# x <- x[-posToRemove]
# lx <- lx[-posToRemove]
# }
#
# # order by increasing value of x
# o <- order(x)
# x <- x[o]
# lx <- lx[o]
#
# out <- new("lifetable", x = x, lx = lx, name = name)
# return(out)
# }
setMethod(f="initialize",
signature="lifetable",
definition=function(.Object, x = 0:3, lx=c(100,90, 50, 10), name="Generic life table") {
if(length(x) != length(lx)) stop("length of x and lx must be equal")
posToRemove <- which(lx %in% c(0,NA))
if(length(posToRemove) > 0) {
x <- x[-posToRemove]
lx <- lx[-posToRemove]
}
# order by increasing value of x
o <- order(x)
x <- x[o]
lx <- lx[o]
.Object@x <- x
.Object@lx <- lx
.Object@name <- name
validObject(.Object)
return(.Object)
}
)
setMethod(f="initialize",
signature="actuarialtable",
definition=function(.Object, x = 0:3, lx=c(100,90, 50, 10), name="Generic life table", interest=0.03) {
if(length(x) != length(lx)) stop("length of x and lx must be equal")
posToRemove <- which(lx %in% c(0,NA))
if(length(posToRemove) > 0) {
x <- x[-posToRemove]
lx <- lx[-posToRemove]
}
# order by increasing value of x
o <- order(x)
x <- x[o]
lx <- lx[o]
.Object@x <- x
.Object@lx <- lx
.Object@name <- name
.Object@interest <- interest
validObject(.Object)
return(.Object)
}
)
#validity method for lifetable object
setValidity("lifetable",
function(object) {
check <- character(0)
if(length(object@x)!=length(object@lx))
check <- c(check, "x and lx do not match in length")
if(any(diff(object@lx)>0))
check <- c(check, "lx must be non-increasing")
if(any(abs(object@x - floor(object@x)) > 0))
check <- c(check, "x must be integral")
if(any(object@x < 0))
check <- c(check, "x must be non-negative")
if(any(diff(object@x) != 1))
check <- c(check, "x must be consecutive integers")
if(length(check) == 0)
return(TRUE)
else
return(check)
}
)
#function to create lifetable cols
.createLifeTableCols<-function(object)
{
omega<-length(object@lx)+1
#vector used to obtain px
lxplus<-object@lx[2:length(object@lx)]
lxplus<-c(lxplus,0)
#ex
lenlx=length(object@lx)
Tx=numeric(lenlx)
Lx=numeric(lenlx)
exni=numeric(lenlx)
for(i in 1:lenlx) Tx[i]=sum(object@lx[i:lenlx])
#for(i in 1:lenlx) Lx[i]=Lxt(object=object, x=i) # 1:lenlx prima object@x
for(i in 1:lenlx) exni[i]=exn(object=object, x=i-1,type="curtate") #prima x=i e come sopra e c'era complete
out<-data.frame(x=object@x, lx=object@lx,px=lxplus/object@lx,
ex=exni)
#remove last row
out<-out[1:(nrow(out)-1),]
rownames(out)=NULL
return(out)
}
#show method 4 lifetable: prints x, lx, px, ex
setMethod("show","lifetable", #metodo show
function(object){
cat(paste("Life table",object@name),"\n")
cat("\n")
out<-.createLifeTableCols(object)
print(out)
cat("\n")
}
)
#show method 4 lifetable: prints x, lx, px, ex
setMethod("print","lifetable", #metodo show
function(x){
cat(paste("Life table",x@name),"\n")
cat("\n")
out<-.createLifeTableCols(x)
print(out)
cat("\n")
}
)
#head and tail methods
setMethod("head",
signature(x = "lifetable"),
function (x, ...)
{
temp<-data.frame(x=x@x, lx=x@lx)
head(temp)
}
)
#summary
setMethod("summary",
signature(object="lifetable"),
function (object, ...)
{
cat("This is lifetable: ",object@name, "\n","Omega age is: ",getOmega(object), "\n", "Expected curtated lifetime at birth is: ",exn(object))
}
)
setMethod("summary",
signature(object="actuarialtable"),
function (object, ...)
{
cat("This is lifetable: ",object@name, "\n","Omega age is: ",getOmega(object), "\n",
"Expected curtated lifetime at birth is: ",exn(object),
"Interest rate used is:",object@interest)
}
)
#tail
setMethod("tail",
signature(x = "lifetable"),
function (x, ...)
{
temp<-data.frame(x=x@x, lx=x@lx)
tail(temp)
}
)
#internal function to create the actuarial table object
.createActuarialTableCols<-function(object)
{
omega<-length(object@lx)+1
#vector used to obtain px
lxplus<-object@lx[2:length(object@lx)]
lxplus<-c(lxplus,0)
#Dx
Dx=object@lx*(1+object@interest)^(-object@x)
lnDx=length(Dx)
#Cx
dx=object@lx-lxplus
Cx=dx*(1+object@interest)^(-object@x-1)
#Nx
Nx=numeric(length(Dx))
for(i in 1:length(Dx)) Nx[i]=sum(Dx[i:lnDx])
#Mx
Mx=Dx-(object@interest/(1+object@interest))*Nx
#Rx
Rx=numeric(length(Mx))
lnMx=length(Mx)
for(i in 1:length(Rx)) Rx[i]=sum(Mx[i:lnMx])
out<-data.frame(x=object@x, lx=object@lx, Dx=Dx, Nx=Nx, Cx=Cx,
Mx=Mx, Rx=Rx)
rownames(out)=NULL
return(out)
}
setMethod("show","actuarialtable", #metodo show
function(object){
out<-NULL
cat(paste("Actuarial table ",object@name, "interest rate ", object@interest*100,"%"),"\n")
cat("\n")
#create the actuarial table object
out<-.createActuarialTableCols(object=object)
print(out)
cat("\n")
}
)
#print method: show clone
setMethod("print","actuarialtable", #metodo show
function(x){
out<-NULL
cat(paste("Actuarial table ",x@name, "interest rate ",
x@interest*100,"%"),"\n")
cat("\n")
#create the actuarial table object
out<-.createActuarialTableCols(object=x)
print(out)
cat("\n")
}
)
setMethod("plot","lifetable",
function(x,y,...){
plot(x=x@x, y=x@lx, xlab="x values",
ylab="population at risk",
main=paste("life table",x@name),...)
}
)
#saves lifeTableObj as data frame
setAs("lifetable","data.frame",
function(from){
out<-.createLifeTableCols(object=from)
return(out)
}
)
#get a data.frame containing x and lx and returns a new lifetable object
setAs(from="data.frame",to="lifetable",
def=function(from){
if(any(is.na(match(c("x","lx"), names(from))))) stop("Error! Both x and lx columns required!")
from<-from[complete.cases(from),]
out<-new("lifetable",x=from$x, lx=from$lx, name="COERCED")
return(out)
}
)
#saves actuarialtable as data frame (have same slots as life - table)
setAs("actuarialtable","data.frame",
function(from){
out<-.createActuarialTableCols(object=from)
return(out)
}
)
#coerce methods to numeric
setAs("lifetable","numeric",
function(from) {
out<-numeric(getOmega(from)+1)
for(i in 0:getOmega(from)) out[i+1]<-qxt(object=from,x=i,t=1)
return(out)
}
)
setAs("actuarialtable","numeric",
function(from) {
out<-numeric(getOmega(from))
for(i in 0:(getOmega(from)-2)) out[i+1]<-Axn(actuarialtable=from,x=i)
return(out)
}
)
#demographic classes and methods
#setGeneric("pxt", function(object) standardGeneric("pxt"))
#setGeneric("qxt", function(object) standardGeneric("qxt"))
|
/R/0_lifetableAndActuarialtableClassesAndMethods.R
|
no_license
|
biblioactuary/lifecontingencies
|
R
| false
| false
| 8,097
|
r
|
# TODO: Add comment
#
# Author: Giorgio Spedicato
###############################################################################
# need to set up Rcpp calls
#CLASSES DEFINITIONS
setClass("lifetable", #classe lifetable
representation(x="numeric",lx="numeric",name="character"),
prototype(x=c(0,1,2,3),
lx=c(100,90,50,10),
name="Generic life table"
)
)
#actuarial classes
setClass("actuarialtable",
representation=representation(interest="numeric"),
contains="lifetable",
prototype(x=c(0,1,2,3),
lx=c(100,90,50,10),
name="Generic actuarial table",
interest=0.03
)
)
#METHODS DEFINITIONS
#constructor for lifetable object
# lifetable <- function(x = 0:3, lx = c(100,90, 50, 10), name = "Generic life table") {
# if(length(x) != length(lx)) stop("length of x and lx must be equal")
#
# posToRemove <- which(lx %in% c(0,NA))
# if(length(posToRemove) > 0) {
# x <- x[-posToRemove]
# lx <- lx[-posToRemove]
# }
#
# # order by increasing value of x
# o <- order(x)
# x <- x[o]
# lx <- lx[o]
#
# out <- new("lifetable", x = x, lx = lx, name = name)
# return(out)
# }
setMethod(f="initialize",
signature="lifetable",
definition=function(.Object, x = 0:3, lx=c(100,90, 50, 10), name="Generic life table") {
if(length(x) != length(lx)) stop("length of x and lx must be equal")
posToRemove <- which(lx %in% c(0,NA))
if(length(posToRemove) > 0) {
x <- x[-posToRemove]
lx <- lx[-posToRemove]
}
# order by increasing value of x
o <- order(x)
x <- x[o]
lx <- lx[o]
.Object@x <- x
.Object@lx <- lx
.Object@name <- name
validObject(.Object)
return(.Object)
}
)
setMethod(f="initialize",
signature="actuarialtable",
definition=function(.Object, x = 0:3, lx=c(100,90, 50, 10), name="Generic life table", interest=0.03) {
if(length(x) != length(lx)) stop("length of x and lx must be equal")
posToRemove <- which(lx %in% c(0,NA))
if(length(posToRemove) > 0) {
x <- x[-posToRemove]
lx <- lx[-posToRemove]
}
# order by increasing value of x
o <- order(x)
x <- x[o]
lx <- lx[o]
.Object@x <- x
.Object@lx <- lx
.Object@name <- name
.Object@interest <- interest
validObject(.Object)
return(.Object)
}
)
#validity method for lifetable object
setValidity("lifetable",
function(object) {
check <- character(0)
if(length(object@x)!=length(object@lx))
check <- c(check, "x and lx do not match in length")
if(any(diff(object@lx)>0))
check <- c(check, "lx must be non-increasing")
if(any(abs(object@x - floor(object@x)) > 0))
check <- c(check, "x must be integral")
if(any(object@x < 0))
check <- c(check, "x must be non-negative")
if(any(diff(object@x) != 1))
check <- c(check, "x must be consecutive integers")
if(length(check) == 0)
return(TRUE)
else
return(check)
}
)
#function to create lifetable cols
.createLifeTableCols<-function(object)
{
omega<-length(object@lx)+1
#vector used to obtain px
lxplus<-object@lx[2:length(object@lx)]
lxplus<-c(lxplus,0)
#ex
lenlx=length(object@lx)
Tx=numeric(lenlx)
Lx=numeric(lenlx)
exni=numeric(lenlx)
for(i in 1:lenlx) Tx[i]=sum(object@lx[i:lenlx])
#for(i in 1:lenlx) Lx[i]=Lxt(object=object, x=i) # 1:lenlx prima object@x
for(i in 1:lenlx) exni[i]=exn(object=object, x=i-1,type="curtate") #prima x=i e come sopra e c'era complete
out<-data.frame(x=object@x, lx=object@lx,px=lxplus/object@lx,
ex=exni)
#remove last row
out<-out[1:(nrow(out)-1),]
rownames(out)=NULL
return(out)
}
#show method 4 lifetable: prints x, lx, px, ex
setMethod("show","lifetable", #metodo show
function(object){
cat(paste("Life table",object@name),"\n")
cat("\n")
out<-.createLifeTableCols(object)
print(out)
cat("\n")
}
)
#show method 4 lifetable: prints x, lx, px, ex
setMethod("print","lifetable", #metodo show
function(x){
cat(paste("Life table",x@name),"\n")
cat("\n")
out<-.createLifeTableCols(x)
print(out)
cat("\n")
}
)
#head and tail methods
setMethod("head",
signature(x = "lifetable"),
function (x, ...)
{
temp<-data.frame(x=x@x, lx=x@lx)
head(temp)
}
)
#summary
setMethod("summary",
signature(object="lifetable"),
function (object, ...)
{
cat("This is lifetable: ",object@name, "\n","Omega age is: ",getOmega(object), "\n", "Expected curtated lifetime at birth is: ",exn(object))
}
)
setMethod("summary",
signature(object="actuarialtable"),
function (object, ...)
{
cat("This is lifetable: ",object@name, "\n","Omega age is: ",getOmega(object), "\n",
"Expected curtated lifetime at birth is: ",exn(object),
"Interest rate used is:",object@interest)
}
)
#tail
setMethod("tail",
signature(x = "lifetable"),
function (x, ...)
{
temp<-data.frame(x=x@x, lx=x@lx)
tail(temp)
}
)
#internal function to create the actuarial table object
.createActuarialTableCols<-function(object)
{
omega<-length(object@lx)+1
#vector used to obtain px
lxplus<-object@lx[2:length(object@lx)]
lxplus<-c(lxplus,0)
#Dx
Dx=object@lx*(1+object@interest)^(-object@x)
lnDx=length(Dx)
#Cx
dx=object@lx-lxplus
Cx=dx*(1+object@interest)^(-object@x-1)
#Nx
Nx=numeric(length(Dx))
for(i in 1:length(Dx)) Nx[i]=sum(Dx[i:lnDx])
#Mx
Mx=Dx-(object@interest/(1+object@interest))*Nx
#Rx
Rx=numeric(length(Mx))
lnMx=length(Mx)
for(i in 1:length(Rx)) Rx[i]=sum(Mx[i:lnMx])
out<-data.frame(x=object@x, lx=object@lx, Dx=Dx, Nx=Nx, Cx=Cx,
Mx=Mx, Rx=Rx)
rownames(out)=NULL
return(out)
}
setMethod("show","actuarialtable", #metodo show
function(object){
out<-NULL
cat(paste("Actuarial table ",object@name, "interest rate ", object@interest*100,"%"),"\n")
cat("\n")
#create the actuarial table object
out<-.createActuarialTableCols(object=object)
print(out)
cat("\n")
}
)
#print method: show clone
setMethod("print","actuarialtable", #metodo show
function(x){
out<-NULL
cat(paste("Actuarial table ",x@name, "interest rate ",
x@interest*100,"%"),"\n")
cat("\n")
#create the actuarial table object
out<-.createActuarialTableCols(object=x)
print(out)
cat("\n")
}
)
setMethod("plot","lifetable",
function(x,y,...){
plot(x=x@x, y=x@lx, xlab="x values",
ylab="population at risk",
main=paste("life table",x@name),...)
}
)
#saves lifeTableObj as data frame
setAs("lifetable","data.frame",
function(from){
out<-.createLifeTableCols(object=from)
return(out)
}
)
#get a data.frame containing x and lx and returns a new lifetable object
setAs(from="data.frame",to="lifetable",
def=function(from){
if(any(is.na(match(c("x","lx"), names(from))))) stop("Error! Both x and lx columns required!")
from<-from[complete.cases(from),]
out<-new("lifetable",x=from$x, lx=from$lx, name="COERCED")
return(out)
}
)
#saves actuarialtable as data frame (have same slots as life - table)
setAs("actuarialtable","data.frame",
function(from){
out<-.createActuarialTableCols(object=from)
return(out)
}
)
#coerce methods to numeric
setAs("lifetable","numeric",
function(from) {
out<-numeric(getOmega(from)+1)
for(i in 0:getOmega(from)) out[i+1]<-qxt(object=from,x=i,t=1)
return(out)
}
)
setAs("actuarialtable","numeric",
function(from) {
out<-numeric(getOmega(from))
for(i in 0:(getOmega(from)-2)) out[i+1]<-Axn(actuarialtable=from,x=i)
return(out)
}
)
#demographic classes and methods
#setGeneric("pxt", function(object) standardGeneric("pxt"))
#setGeneric("qxt", function(object) standardGeneric("qxt"))
|
# Read in the data from flat file,
# filtered to only load the rows matching date condition.
# Prefiltering with shell pipe is much more space efficient
# than loading the whole file followed by subsetting the frame.
data <- read.delim(pipe("head -n1 household_power_consumption.txt;
grep '^[1-2]/2/2007' household_power_consumption.txt")
, sep = ";"
, stringsAsFactors = FALSE
, na.strings = "?")
# Combine the separate Date and Time columns into a single POSIXct called datetime
data$datetime <- as.POSIXct(paste(data$Date, data$Time), format = "%d/%m/%Y %H:%M:%S")
# Drop the original Date and Time columns
data <- subset(data, select = -c(Date, Time) )
# Open plot device so we will write to a PNG file
png("plot4.png", width=480, height=480)
# Set 2x2 plot grid
par(mfcol=c(2,2))
# Plot 1 (upper left)
with(data, plot(datetime, Global_active_power, type="l", ylab="Global Active Power", xlab=""))
# Plot 2 (lower left)
with(data, plot(datetime, Sub_metering_1, type="l", ylab="Energy sub metering", xlab="", col="black"))
with(data, lines(datetime, Sub_metering_2, type="l", ylab="Energy sub metering", xlab="", col="red"))
with(data, lines(datetime, Sub_metering_3, type="l", ylab="Energy sub metering", xlab="", col="blue"))
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty = c(1,1,1), col=c("black","red","blue"), bty="n")
# Plot 3 (upper right)
with(data, plot(datetime, Voltage, type="l"))
# Plot 4 (lower right)
with(data, plot(datetime, Global_reactive_power, type="l"))
# Close the plot device (write the png file)
dev.off()
|
/plot4.R
|
no_license
|
Josholith/ExData_Plotting1
|
R
| false
| false
| 1,649
|
r
|
# Read in the data from flat file,
# filtered to only load the rows matching date condition.
# Prefiltering with shell pipe is much more space efficient
# than loading the whole file followed by subsetting the frame.
data <- read.delim(pipe("head -n1 household_power_consumption.txt;
grep '^[1-2]/2/2007' household_power_consumption.txt")
, sep = ";"
, stringsAsFactors = FALSE
, na.strings = "?")
# Combine the separate Date and Time columns into a single POSIXct called datetime
data$datetime <- as.POSIXct(paste(data$Date, data$Time), format = "%d/%m/%Y %H:%M:%S")
# Drop the original Date and Time columns
data <- subset(data, select = -c(Date, Time) )
# Open plot device so we will write to a PNG file
png("plot4.png", width=480, height=480)
# Set 2x2 plot grid
par(mfcol=c(2,2))
# Plot 1 (upper left)
with(data, plot(datetime, Global_active_power, type="l", ylab="Global Active Power", xlab=""))
# Plot 2 (lower left)
with(data, plot(datetime, Sub_metering_1, type="l", ylab="Energy sub metering", xlab="", col="black"))
with(data, lines(datetime, Sub_metering_2, type="l", ylab="Energy sub metering", xlab="", col="red"))
with(data, lines(datetime, Sub_metering_3, type="l", ylab="Energy sub metering", xlab="", col="blue"))
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty = c(1,1,1), col=c("black","red","blue"), bty="n")
# Plot 3 (upper right)
with(data, plot(datetime, Voltage, type="l"))
# Plot 4 (lower right)
with(data, plot(datetime, Global_reactive_power, type="l"))
# Close the plot device (write the png file)
dev.off()
|
/Stepic_2_Week_3_Practice.R
|
no_license
|
SENSBoD/Stepic_Basics-of-statistics
|
R
| false
| false
| 8,554
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/androidenterprise_functions.R
\name{collections.update}
\alias{collections.update}
\title{Updates a collection.}
\usage{
collections.update(Collection, enterpriseId, collectionId)
}
\arguments{
\item{Collection}{The \link{Collection} object to pass to this method}
\item{enterpriseId}{The ID of the enterprise}
\item{collectionId}{The ID of the collection}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/androidenterprise
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/androidenterprise)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/android/work/play/emm-api}{Google Documentation}
Other Collection functions: \code{\link{Collection}},
\code{\link{collections.insert}},
\code{\link{collections.patch}}
}
|
/googleandroidenterprisev1.auto/man/collections.update.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 1,091
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/androidenterprise_functions.R
\name{collections.update}
\alias{collections.update}
\title{Updates a collection.}
\usage{
collections.update(Collection, enterpriseId, collectionId)
}
\arguments{
\item{Collection}{The \link{Collection} object to pass to this method}
\item{enterpriseId}{The ID of the enterprise}
\item{collectionId}{The ID of the collection}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/androidenterprise
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/androidenterprise)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/android/work/play/emm-api}{Google Documentation}
Other Collection functions: \code{\link{Collection}},
\code{\link{collections.insert}},
\code{\link{collections.patch}}
}
|
#Assignment 7.3
#1. Create a box and whisker plot by class using mtcars dataset.
#Solution:
boxplot(mpg~cyl, data=mtcars,main= toupper("Fuel Consumption"), font.main=3,col= topo.colors(3), xlab="Number of Cylinders",
ylab="Miles per Gallon")
#or
boxplot(mpg~cyl,data=mtcars, main="Car Milage Data",xlab="Number of Cylinders", ylab="Miles Per Gallon")
|
/assignment-7.3.r
|
no_license
|
jaswanthkotni/assignment-7
|
R
| false
| false
| 373
|
r
|
#Assignment 7.3
#1. Create a box and whisker plot by class using mtcars dataset.
#Solution:
boxplot(mpg~cyl, data=mtcars,main= toupper("Fuel Consumption"), font.main=3,col= topo.colors(3), xlab="Number of Cylinders",
ylab="Miles per Gallon")
#or
boxplot(mpg~cyl,data=mtcars, main="Car Milage Data",xlab="Number of Cylinders", ylab="Miles Per Gallon")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sweave_report_pathway.R
\name{CreatePathAnalDoc}
\alias{CreatePathAnalDoc}
\title{Create report of analyses (Met Pathway)}
\usage{
CreatePathAnalDoc(mSetObj = NA)
}
\arguments{
\item{mSetObj}{Input the name of the created mSetObj (see InitDataObjects)}
}
\description{
Report generation using Sweave
Metabolomic pathway analysis
Create pathway analysis doc
}
\author{
Jeff Xia \email{jeff.xia@mcgill.ca}
McGill University, Canada
License: GNU GPL (>= 2)
}
|
/man/CreatePathAnalDoc.Rd
|
permissive
|
xia-lab/MetaboAnalystR
|
R
| false
| true
| 534
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sweave_report_pathway.R
\name{CreatePathAnalDoc}
\alias{CreatePathAnalDoc}
\title{Create report of analyses (Met Pathway)}
\usage{
CreatePathAnalDoc(mSetObj = NA)
}
\arguments{
\item{mSetObj}{Input the name of the created mSetObj (see InitDataObjects)}
}
\description{
Report generation using Sweave
Metabolomic pathway analysis
Create pathway analysis doc
}
\author{
Jeff Xia \email{jeff.xia@mcgill.ca}
McGill University, Canada
License: GNU GPL (>= 2)
}
|
# Loesche ALLES
rm( list = ls( ) )
#setwd( "Juliane/Desktop/pv0116_neu/" )
setwd( "~/LIFE/life-for-postgraduates/JulianeWilz/data/" )
library(readxl)
library( dplyr )
library( ggplot2 )
library( GGally )
library( Hmisc )
library(dplyr)
library( openxlsx )
#daten <- read_excel( "PV0116_Arbeitsversion.xlsx" )
daten <- read_excel( "PV0116_GesamtJoin.xlsx" )
df <- daten[ !is.na( daten$CT_S_1_NUM_VALUE ), ]
diseases <- c( "C_DISEASE_TX_SD_ALLG", "C_DISEASE_TX_SD_HYPER",
"C_DISEASE_TX_SD_HYPO", "C_DISEASE_TX_VITD", "C_DISEASE_TX_DM1", "C_DISEASE_TX_DM2",
"C_DISEASE_TX_BLUT", "C_DISEASE_TX_GERIN",
"C_DISEASE_TX_NEPHRO", "C_DISEASE_TX_NIERENFEHL",
"C_DISEASE_TX_ANGIO", "C_DISEASE_TX_DEPRES", "C_DISEASE_TX_SUCHT",
"C_DISEASE_TX_MUSKEL", "C_DISEASE_TX_GIT", "C_DISEASE_TX_KARDIO",
"C_DISEASE_TX_KARDIO_RHYTH")
#diseases1 <- c( "C_DISEASE_TX_SD_ALLG", "C_DISEASE_TX_SD_HYPER",
#"C_DISEASE_TX_SD_HYPO", "C_DISEASE_TX_VITD", "C_DISEASE_TX_DM1", "C_DISEASE_TX_DM2",
#"C_DISEASE_TX_BLUT", "C_DISEASE_TX_GERIN",
#"C_DISEASE_TX_NEPHRO", "C_DISEASE_TX_NIERENFEHL",
#"C_DISEASE_TX_ANGIO", "C_DISEASE_TX_DEPRES", "C_DISEASE_TX_SUCHT",
#"C_DISEASE_TX_MUSKEL")
medik <- c( "CHILD_MED_H_METFORMIN", "CHILD_MED_H_INSULIN", "CHILD_MED_H_LTHYROX",
"CHILD_MED_H_HORMONE", "CHILD_MED_H_SEX_STEROIDE", "CHILD_MED_H_WACHSTUM",
"CHILD_MED_H_DESMOPRESS", "CHILD_MED_H_GLUCO_CORT",
"CHILD_MED_H_TESTO", "CHILD_MED_H_ASTHMA")
summ <-
function( curr.row ) {
sum( !is.na( curr.row ) & curr.row == 1 )
}
mdk.dss <-
c( medik, diseases )
# Das ist die neue Funktion, die Dir alle Mediks und Diseases rausfiltert und die NAs zu auf 0 setzt
filter.for.reasonable.values <-
function( dat.frm, interesting.cols ) {
na.flags <-
as.data.frame(
apply(
df[ , interesting.cols ],
2.,
is.na
)
)
df[ , interesting.cols ] <-
as.data.frame(
mapply(
df[ , interesting.cols ],
na.flags,
FUN = function( a, b ) ifelse( b, 0, a ) ) )
df$sum.of.ones <-
apply(
df[ , interesting.cols ],
1,
summ
)
df <-
df[ df$sum.of.ones < 1, ]
# df$sum.of.ones <-
# NULL
df
}
df <-
filter.for.reasonable.values( df, mdk.dss )
View( df[ c( "CT_S_1_SIC", "CT_S_1_GRUPPE", mdk.dss ) ] )
# ab hier haste nur noch "gesunde" Leute, die "keine Medikamente" eingenommen haben
nichtausgeschlossen#Hüftdysplasie <- c("7D32247AE1A2_3M","5E726880B3A2_3M","9455D30FA4A2_3M","6A4A9D2DE4A2_3M", "9455D30FA4A2_01","07E300BEFAA2_01"
#"4293018CFFA2_6M","57145BC607A2_01","8D7AB20F9CA2_3M","C4280BA5C4A2_3M","63B6F4042BA2_3M","57145BC607A2_6M")
Ikterus <- c("F253BA9FDCA2-SK_01", "40EF9053CDA2_3M","27F3AAFDF8A2-SK_01","4C1C477A09A2_3M","72AC41C68FA2_3M","2D14EEAF9EA2_3M",
"7F5A47DEFAA2_01", "B6FC81B271A2_3M","38CF322948A2_3M","8641A70968A2_3M")
Niere <- c("CAA1484CF0A2_04", "2DC3B0ADE2A2_11","B5286CD3F9A2_15","7179E63ED5A2_06","081940C4D5A2_10","BF1ADED87CA2_3M","69FEBB9A07A2_08",
"DFEA8BF395A2_09","DADED57804A2_07","3E0513BAA2A2_16","FBC647BF53A2_01","ED731EADAEA2_11", "E16FC4DFB2A2_3M")
GIT <- c("3FB83E76EDA2-SK_05")
Ausreisser <- c("FC11C38221A2_3M")
Ritalin <- c("3234E27F42B1_16","3F37D4C832A2_11","5F56BA3A4AB1_12","8504D6FB2EA2_13","E72CDACCBAA2_15","8504D6FB2EA2_12","03419A0DEAA2-SK_01",
"8504D6FB2EA2_14" )
MuskErkrankung <- c("4F267F4F54A2_16","BF6CDD9F6FA2_10","B984F164F8A2_01")
Pantoprazol <- c("32A73968C0A2_15", "F5936AD536A2_15", "840D327FDAA2_14", "9C3E0A5C7AA2_14", "E7E6C4D7F9A2_15")
SICGROUP <- c( GIT, Ikterus, Ritalin, MuskErkrankung, Pantoprazol, Ausreisser )
df1 <- df[!paste0( df$CT_S_1_SIC, df$CT_S_1_GRUPPE) %in% SICGROUP, ]
ASD <- c("F4D84480C7" )
VSD <- c("19BD6A6AAC","37A2BC8419","63820C4B8E","8641A70968","8E72CCA696","C4C48F737A")
Mitral <- c("E7B754719C","1D1327997B","072C7C1912","27CC9E6D64", "46CB460261",
"90F008C440", "B383040016")
Aorten <- c("28CCC778A4", "3F3B78A16D", "E8FA497455")
mehrereHerzkrankheiten <- c("28CCC778A4")
MMeulengracht<- c("E60A583B4A", "D1E1792725","228CA1C40E", "EACDCF372D" )
Niere <- c("CAA1484CF0", "2DC3B0ADE2","B5286CD3F9","7179E63ED5","081940C4D5","BF1ADED87C","69FEBB9A07",
"DFEA8BF395","DADED57804","3E0513BAA2","FBC647BF53","ED731EADAE", "E16FC4DFB2")
Knochen <- c("7BF6EFE2CD", "87691882D4","9C4A6A31B5","D22C464F28","353A8B3870","7D09FB4586","4B58B02900",
"6C0AC81AE7","76C48EF23B","53225E8183","C0A44D788A","5D17742E68","9E7C2CF446","7F8C935C88","8D37A6E155",
"2B0240A66D","0FB7E114F6","5BC70742CE","29E70F2C61","6634694A7C","20D4416103" ,"710EA2D8A3","DDAF42DD68",
"0EE9B0EFD2","6622F5E746","2B9F681AB3","F9F266BDB8", "A5543137CC")
Gehirnschädigung <- c("08BC3D640D", "0A7F789EFB","08BC3D640D")
Sonstiges <- c("278D8061B2", "DC395B3CC8","B03385C1E3","2542D1625C","7B4E0CDAE8","A0EC5EB277","F3BE3660F0",
"AC2DB1F248","FDC849B4BF","238AF1D07C","9249323B02","79FD6D1603","86CCE3AE09", "3495EE330C","3495EE330C",
"0677B362C6","4880CB93E3","674CF37825","D4C93A0446","713D6B6534","35D5BA9E5E","7B54C4FAEE",
"BC26147BEC","0A7F789EFB","844E26F4B0", "6553E9B0FF")
Hormone <- c("A32701D433","1708D266B1","FA8AD8BD1D",
"EED92F1F55","28EFA1AD5A","B357E9BB61","A290507BB8","2D0EF351A6","31851D333F",
"EB9B84517C","99F6DA6C37","9BB155D7D0","16A74EFEF4","65BBC9638A","3BFB8F8246")
maligneErkrankung <- c("58E7E02A9E", "3870E289B1")
Wachstumsretardierung <- c("BF00616007","E1A9F2741D")
BipolareStoerung <- c("4F267F4F54")
SICS.alle <- c(ASD, VSD, Mitral, Aorten, mehrereHerzkrankheiten, MMeulengracht, Knochen, Niere, Gehirnschädigung, Sonstiges, Hormone, Wachstumsretardierung, maligneErkrankung, BipolareStoerung)
neue.Tabelle <- df1[ !df1$CT_S_1_SIC %in% SICS.alle, ]
write.xlsx( x = neue.Tabelle, file = "AktuelleTabelle220517excel.xlsx" )
|
/JulianeWilz/r/NeueFilterung220517.R
|
no_license
|
TPeschel/life-for-postgraduates
|
R
| false
| false
| 6,292
|
r
|
# Loesche ALLES
rm( list = ls( ) )
#setwd( "Juliane/Desktop/pv0116_neu/" )
setwd( "~/LIFE/life-for-postgraduates/JulianeWilz/data/" )
library(readxl)
library( dplyr )
library( ggplot2 )
library( GGally )
library( Hmisc )
library(dplyr)
library( openxlsx )
#daten <- read_excel( "PV0116_Arbeitsversion.xlsx" )
daten <- read_excel( "PV0116_GesamtJoin.xlsx" )
df <- daten[ !is.na( daten$CT_S_1_NUM_VALUE ), ]
diseases <- c( "C_DISEASE_TX_SD_ALLG", "C_DISEASE_TX_SD_HYPER",
"C_DISEASE_TX_SD_HYPO", "C_DISEASE_TX_VITD", "C_DISEASE_TX_DM1", "C_DISEASE_TX_DM2",
"C_DISEASE_TX_BLUT", "C_DISEASE_TX_GERIN",
"C_DISEASE_TX_NEPHRO", "C_DISEASE_TX_NIERENFEHL",
"C_DISEASE_TX_ANGIO", "C_DISEASE_TX_DEPRES", "C_DISEASE_TX_SUCHT",
"C_DISEASE_TX_MUSKEL", "C_DISEASE_TX_GIT", "C_DISEASE_TX_KARDIO",
"C_DISEASE_TX_KARDIO_RHYTH")
#diseases1 <- c( "C_DISEASE_TX_SD_ALLG", "C_DISEASE_TX_SD_HYPER",
#"C_DISEASE_TX_SD_HYPO", "C_DISEASE_TX_VITD", "C_DISEASE_TX_DM1", "C_DISEASE_TX_DM2",
#"C_DISEASE_TX_BLUT", "C_DISEASE_TX_GERIN",
#"C_DISEASE_TX_NEPHRO", "C_DISEASE_TX_NIERENFEHL",
#"C_DISEASE_TX_ANGIO", "C_DISEASE_TX_DEPRES", "C_DISEASE_TX_SUCHT",
#"C_DISEASE_TX_MUSKEL")
medik <- c( "CHILD_MED_H_METFORMIN", "CHILD_MED_H_INSULIN", "CHILD_MED_H_LTHYROX",
"CHILD_MED_H_HORMONE", "CHILD_MED_H_SEX_STEROIDE", "CHILD_MED_H_WACHSTUM",
"CHILD_MED_H_DESMOPRESS", "CHILD_MED_H_GLUCO_CORT",
"CHILD_MED_H_TESTO", "CHILD_MED_H_ASTHMA")
summ <-
function( curr.row ) {
sum( !is.na( curr.row ) & curr.row == 1 )
}
mdk.dss <-
c( medik, diseases )
# Das ist die neue Funktion, die Dir alle Mediks und Diseases rausfiltert und die NAs zu auf 0 setzt
filter.for.reasonable.values <-
function( dat.frm, interesting.cols ) {
na.flags <-
as.data.frame(
apply(
df[ , interesting.cols ],
2.,
is.na
)
)
df[ , interesting.cols ] <-
as.data.frame(
mapply(
df[ , interesting.cols ],
na.flags,
FUN = function( a, b ) ifelse( b, 0, a ) ) )
df$sum.of.ones <-
apply(
df[ , interesting.cols ],
1,
summ
)
df <-
df[ df$sum.of.ones < 1, ]
# df$sum.of.ones <-
# NULL
df
}
df <-
filter.for.reasonable.values( df, mdk.dss )
View( df[ c( "CT_S_1_SIC", "CT_S_1_GRUPPE", mdk.dss ) ] )
# ab hier haste nur noch "gesunde" Leute, die "keine Medikamente" eingenommen haben
nichtausgeschlossen#Hüftdysplasie <- c("7D32247AE1A2_3M","5E726880B3A2_3M","9455D30FA4A2_3M","6A4A9D2DE4A2_3M", "9455D30FA4A2_01","07E300BEFAA2_01"
#"4293018CFFA2_6M","57145BC607A2_01","8D7AB20F9CA2_3M","C4280BA5C4A2_3M","63B6F4042BA2_3M","57145BC607A2_6M")
Ikterus <- c("F253BA9FDCA2-SK_01", "40EF9053CDA2_3M","27F3AAFDF8A2-SK_01","4C1C477A09A2_3M","72AC41C68FA2_3M","2D14EEAF9EA2_3M",
"7F5A47DEFAA2_01", "B6FC81B271A2_3M","38CF322948A2_3M","8641A70968A2_3M")
Niere <- c("CAA1484CF0A2_04", "2DC3B0ADE2A2_11","B5286CD3F9A2_15","7179E63ED5A2_06","081940C4D5A2_10","BF1ADED87CA2_3M","69FEBB9A07A2_08",
"DFEA8BF395A2_09","DADED57804A2_07","3E0513BAA2A2_16","FBC647BF53A2_01","ED731EADAEA2_11", "E16FC4DFB2A2_3M")
GIT <- c("3FB83E76EDA2-SK_05")
Ausreisser <- c("FC11C38221A2_3M")
Ritalin <- c("3234E27F42B1_16","3F37D4C832A2_11","5F56BA3A4AB1_12","8504D6FB2EA2_13","E72CDACCBAA2_15","8504D6FB2EA2_12","03419A0DEAA2-SK_01",
"8504D6FB2EA2_14" )
MuskErkrankung <- c("4F267F4F54A2_16","BF6CDD9F6FA2_10","B984F164F8A2_01")
Pantoprazol <- c("32A73968C0A2_15", "F5936AD536A2_15", "840D327FDAA2_14", "9C3E0A5C7AA2_14", "E7E6C4D7F9A2_15")
SICGROUP <- c( GIT, Ikterus, Ritalin, MuskErkrankung, Pantoprazol, Ausreisser )
df1 <- df[!paste0( df$CT_S_1_SIC, df$CT_S_1_GRUPPE) %in% SICGROUP, ]
ASD <- c("F4D84480C7" )
VSD <- c("19BD6A6AAC","37A2BC8419","63820C4B8E","8641A70968","8E72CCA696","C4C48F737A")
Mitral <- c("E7B754719C","1D1327997B","072C7C1912","27CC9E6D64", "46CB460261",
"90F008C440", "B383040016")
Aorten <- c("28CCC778A4", "3F3B78A16D", "E8FA497455")
mehrereHerzkrankheiten <- c("28CCC778A4")
MMeulengracht<- c("E60A583B4A", "D1E1792725","228CA1C40E", "EACDCF372D" )
Niere <- c("CAA1484CF0", "2DC3B0ADE2","B5286CD3F9","7179E63ED5","081940C4D5","BF1ADED87C","69FEBB9A07",
"DFEA8BF395","DADED57804","3E0513BAA2","FBC647BF53","ED731EADAE", "E16FC4DFB2")
Knochen <- c("7BF6EFE2CD", "87691882D4","9C4A6A31B5","D22C464F28","353A8B3870","7D09FB4586","4B58B02900",
"6C0AC81AE7","76C48EF23B","53225E8183","C0A44D788A","5D17742E68","9E7C2CF446","7F8C935C88","8D37A6E155",
"2B0240A66D","0FB7E114F6","5BC70742CE","29E70F2C61","6634694A7C","20D4416103" ,"710EA2D8A3","DDAF42DD68",
"0EE9B0EFD2","6622F5E746","2B9F681AB3","F9F266BDB8", "A5543137CC")
Gehirnschädigung <- c("08BC3D640D", "0A7F789EFB","08BC3D640D")
Sonstiges <- c("278D8061B2", "DC395B3CC8","B03385C1E3","2542D1625C","7B4E0CDAE8","A0EC5EB277","F3BE3660F0",
"AC2DB1F248","FDC849B4BF","238AF1D07C","9249323B02","79FD6D1603","86CCE3AE09", "3495EE330C","3495EE330C",
"0677B362C6","4880CB93E3","674CF37825","D4C93A0446","713D6B6534","35D5BA9E5E","7B54C4FAEE",
"BC26147BEC","0A7F789EFB","844E26F4B0", "6553E9B0FF")
Hormone <- c("A32701D433","1708D266B1","FA8AD8BD1D",
"EED92F1F55","28EFA1AD5A","B357E9BB61","A290507BB8","2D0EF351A6","31851D333F",
"EB9B84517C","99F6DA6C37","9BB155D7D0","16A74EFEF4","65BBC9638A","3BFB8F8246")
maligneErkrankung <- c("58E7E02A9E", "3870E289B1")
Wachstumsretardierung <- c("BF00616007","E1A9F2741D")
BipolareStoerung <- c("4F267F4F54")
SICS.alle <- c(ASD, VSD, Mitral, Aorten, mehrereHerzkrankheiten, MMeulengracht, Knochen, Niere, Gehirnschädigung, Sonstiges, Hormone, Wachstumsretardierung, maligneErkrankung, BipolareStoerung)
neue.Tabelle <- df1[ !df1$CT_S_1_SIC %in% SICS.alle, ]
write.xlsx( x = neue.Tabelle, file = "AktuelleTabelle220517excel.xlsx" )
|
source('general_script.R')
#Extracting data
subsetNEI <- NEI[NEI$fips=="24510", ]
aggregatedTotalByYearAndType <- aggregate(Emissions ~ year + type, subsetNEI, sum)
#Plotting
png("plot3.png", width=640, height=480)
g <- ggplot(aggregatedTotalByYearAndType, aes(year, Emissions, color = type))
g <- g +
geom_line() +
xlab("year") +
ylab("total PM2.5 emission") +
ggtitle(expression( atop("Total Emissions in Baltimore City, Maryland from 1999 to 2008"
, atop(italic("Only the POINT-type shows an increase over the years"), "")))) +
theme( axis.text.x = element_text(angle=-45, hjust=0, vjust=1)
, plot.title = element_text(size = 15, face = "bold", colour = "black", vjust = -1)
)
print(g)
dev.off()
|
/plot3.R
|
no_license
|
rlondt/ExploratoryDataAnalysisProject2
|
R
| false
| false
| 742
|
r
|
source('general_script.R')
#Extracting data
subsetNEI <- NEI[NEI$fips=="24510", ]
aggregatedTotalByYearAndType <- aggregate(Emissions ~ year + type, subsetNEI, sum)
#Plotting
png("plot3.png", width=640, height=480)
g <- ggplot(aggregatedTotalByYearAndType, aes(year, Emissions, color = type))
g <- g +
geom_line() +
xlab("year") +
ylab("total PM2.5 emission") +
ggtitle(expression( atop("Total Emissions in Baltimore City, Maryland from 1999 to 2008"
, atop(italic("Only the POINT-type shows an increase over the years"), "")))) +
theme( axis.text.x = element_text(angle=-45, hjust=0, vjust=1)
, plot.title = element_text(size = 15, face = "bold", colour = "black", vjust = -1)
)
print(g)
dev.off()
|
\name{setup_called}
\alias{setup_called}
\title{
Sets up Statcast data to obtain called pitches
}
\description{
Sets up Statcast data to obtain called pitches
}
\usage{
setup_called(sc)
}
\arguments{
\item{sc}{
Statcast data frame
}
}
\value{
Statcast data frame for only the called pitches with new variables Strike and Count
}
\author{
Jim Albert
}
|
/man/setup_called.Rd
|
no_license
|
bayesball/CalledStrike
|
R
| false
| false
| 372
|
rd
|
\name{setup_called}
\alias{setup_called}
\title{
Sets up Statcast data to obtain called pitches
}
\description{
Sets up Statcast data to obtain called pitches
}
\usage{
setup_called(sc)
}
\arguments{
\item{sc}{
Statcast data frame
}
}
\value{
Statcast data frame for only the called pitches with new variables Strike and Count
}
\author{
Jim Albert
}
|
#firstly read data. data should be in the current working directory
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#subset for the Baltimore City
baltimoreData <- subset(NEI, fips == "24510")
#calculate sum
totalBaltimore <- tapply(baltimoreData$Emissions, baltimoreData$year, sum)
#draw figure
png("plot2.png")
barplot(totalBaltimore, xlab = "year", ylab= "PM2.5 in tons", main = "Total PM2.5 Emissions for All Sources in Baltimore City")
#close device
dev.off()
|
/plot2.R
|
no_license
|
mikesn922/exdata-data-NEI_data
|
R
| false
| false
| 506
|
r
|
#firstly read data. data should be in the current working directory
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#subset for the Baltimore City
baltimoreData <- subset(NEI, fips == "24510")
#calculate sum
totalBaltimore <- tapply(baltimoreData$Emissions, baltimoreData$year, sum)
#draw figure
png("plot2.png")
barplot(totalBaltimore, xlab = "year", ylab= "PM2.5 in tons", main = "Total PM2.5 Emissions for All Sources in Baltimore City")
#close device
dev.off()
|
#Chapter - 1 : Introduction
x = rnorm(5)
x
x1 <- rnorm(5)
x1
# I prefer = instead of <-
# List 1.1
age <- c(1,3,5,2,11,9,3,9,12,3)
age
weight <- c(4.4,5.3,7.2,5.2,8.5,7.3,6.0,10.4,10.2,6.1)
weight
mean(weight)
#[1] 7.06
sd(weight)
#[1] 2.077498
cor(age,weight)
#[1] 0.9075655
plot(age,weight)
#q() # quit
demo(graphics)
demo(Hershey)
demo(persp)
demo(image)
demo() # all list of demos
help.start()
help('mean') # function
example('mean')
RSiteSearch('mean')
apropos('mean', mode='function')
data() # List of available example datasets contained in currently loaded packages
vignette() # list of avl vignetters - Intro topics in pdf format. Not all have
#View a specified package vignette, or list the available ones; display it rendered in
vignette('tables')
# Workspace - current R Working eng
# Functions for managing R Workspace
getwd()
#setwd('my dir') # windows use c:\xxx\xx
#dir.create(' ...') # to create directory
ls() # list of objects in current ws
rm(z) # remove an object
help(options) # available options
options() # View or set current options
history(5) # last 5 commands
savehistory('myhistory') # save history to file
loadhistory('myhistory')
save.image('myfile') # Save WS to myfile ( default=.RData)
save(age, weight, file='myfile2') # save objects to file*.RData
load('myfile2')
# setwd(' ')
options(digits=3)
x = runif(20)
summary(x)
hist(x)
savehistory()
save.image()
x
options(digits=4)
x = runif(20)
x
options(digits=1)
x = runif(20)
x
# 1.3.4 Input and Output ----------------------
source('test.R') # submit script to current session
sink('test2.R', append=T, split=T) # output to file
# split=T - output to file and screen, append=T- append output to file
# Functions for saving graphic output
pdf('filename.pdf')
win.metafile('filename.wmf')
png('filename.jpg')
bmp('filename.bmp')
postscript('filename.bmp')
# output to file and screen
sink('test3', append=T, split=T)
pdf('mygraph.pdf')
source('test.R')
sink()
dev.off()
source('test.R') # only to screen
# Packages
.libPaths()
library() # packages in library
search() # which packages are loaded
install.packages('gcplus') # install packages
update.packages() # update packages
library(ggplot2)
help(package='ggplot2')
# Batch Commands in terminal windows
#R CMD BATCH options infile outfile
# Output and Input ---------------
lm(mpg ~ wt, data = mtcars)
lmfit = lm(mpg ~ wt, data = mtcars)
plot(lmfit)
summary(lmfit)
cook = cooks.distance(lmfit)
cook
plot(cook)
head(mtcars)
range(mtcars$wt)
predict(lmfit, newdata = data.frame(wt=3))
# Memory
# 2GB - 100000 observations
# New Package
install.packages('vcd')
help(package='vcd')
library(vcd)
help(Arthiritis)
Arthritis
example("Arthritis")
|
/bkRiA/chap01.R
|
no_license
|
dupadhyaya/dspgmsc2017
|
R
| false
| false
| 2,761
|
r
|
#Chapter - 1 : Introduction
x = rnorm(5)
x
x1 <- rnorm(5)
x1
# I prefer = instead of <-
# List 1.1
age <- c(1,3,5,2,11,9,3,9,12,3)
age
weight <- c(4.4,5.3,7.2,5.2,8.5,7.3,6.0,10.4,10.2,6.1)
weight
mean(weight)
#[1] 7.06
sd(weight)
#[1] 2.077498
cor(age,weight)
#[1] 0.9075655
plot(age,weight)
#q() # quit
demo(graphics)
demo(Hershey)
demo(persp)
demo(image)
demo() # all list of demos
help.start()
help('mean') # function
example('mean')
RSiteSearch('mean')
apropos('mean', mode='function')
data() # List of available example datasets contained in currently loaded packages
vignette() # list of avl vignetters - Intro topics in pdf format. Not all have
#View a specified package vignette, or list the available ones; display it rendered in
vignette('tables')
# Workspace - current R Working eng
# Functions for managing R Workspace
getwd()
#setwd('my dir') # windows use c:\xxx\xx
#dir.create(' ...') # to create directory
ls() # list of objects in current ws
rm(z) # remove an object
help(options) # available options
options() # View or set current options
history(5) # last 5 commands
savehistory('myhistory') # save history to file
loadhistory('myhistory')
save.image('myfile') # Save WS to myfile ( default=.RData)
save(age, weight, file='myfile2') # save objects to file*.RData
load('myfile2')
# setwd(' ')
options(digits=3)
x = runif(20)
summary(x)
hist(x)
savehistory()
save.image()
x
options(digits=4)
x = runif(20)
x
options(digits=1)
x = runif(20)
x
# 1.3.4 Input and Output ----------------------
source('test.R') # submit script to current session
sink('test2.R', append=T, split=T) # output to file
# split=T - output to file and screen, append=T- append output to file
# Functions for saving graphic output
pdf('filename.pdf')
win.metafile('filename.wmf')
png('filename.jpg')
bmp('filename.bmp')
postscript('filename.bmp')
# output to file and screen
sink('test3', append=T, split=T)
pdf('mygraph.pdf')
source('test.R')
sink()
dev.off()
source('test.R') # only to screen
# Packages
.libPaths()
library() # packages in library
search() # which packages are loaded
install.packages('gcplus') # install packages
update.packages() # update packages
library(ggplot2)
help(package='ggplot2')
# Batch Commands in terminal windows
#R CMD BATCH options infile outfile
# Output and Input ---------------
lm(mpg ~ wt, data = mtcars)
lmfit = lm(mpg ~ wt, data = mtcars)
plot(lmfit)
summary(lmfit)
cook = cooks.distance(lmfit)
cook
plot(cook)
head(mtcars)
range(mtcars$wt)
predict(lmfit, newdata = data.frame(wt=3))
# Memory
# 2GB - 100000 observations
# New Package
install.packages('vcd')
help(package='vcd')
library(vcd)
help(Arthiritis)
Arthritis
example("Arthritis")
|
#### ---- SPARK FUND CASE STUDY ---- ####
# Loading datasets
companies <- read.delim("companies.txt", stringsAsFactors = FALSE)
rounds <- read.csv("rounds2.csv", stringsAsFactors = FALSE)
mapping <- read.csv("mapping.csv", stringsAsFactors = FALSE, check.names = F)
str(companies)
require(dplyr)
glimpse(rounds)
View(mapping)
names(companies)
names(rounds)
names(mapping)
# , check.names = F to keep column names from distorting
# Check NA's
sum(is.na(companies))
sum(is.na(rounds))
sum(is.na(mapping))
colSums(is.na(rounds))
# Check blanks
colSums(companies == "")
colSums(rounds == "")
colSums(mapping == "")
# Table 1 ----
# To avoid error due to case sensitive nature of R
rounds$company_permalink <- tolower(rounds$company_permalink)
companies$permalink <- tolower(companies$permalink)
# Or we can do so in just one go -->>
companies <- sapply(companies, tolower)
companies <- as.data.frame(companies, stringsAsFactors = F)
rounds <- sapply(rounds, tolower)
rounds <- as.data.frame(rounds, stringsAsFactors = F)
# But keep an eye on structure as sapply outupt is matrix
str(rounds)
rounds$raised_amount_usd <- as.numeric(rounds$raised_amount_usd)
str(rounds)
?mutate_if
# Or better use mutate_if of dplyr
mapping <- mutate_if(mapping, is.character, tolower)
str(mapping)
# Q.1 No. of unique companies in rounds
length(unique(rounds$company_permalink))
# 66368
# Q.2 No. of unique companies in companies
k <- distinct(companies, permalink)
count(k)
# 66368
# Q.3 In the companies data frame, which column can be used as the unique key for each company?
# Count distinct values in each column and find out
sapply(companies, n_distinct)
n_distinct(companies$permalink)
# Q.4 Are there any companies in the companies file which are not present in rounds?
sum(!companies$permalink %in% rounds$company_permalink) # The NOT operator -->> "!"
# Q.5 Merge the two data frames as master_frame.
# How many observations are present in master_frame?
master_frame <- merge(rounds, companies, by = 'permalink')
# error because common column name is different in both datasets
# by parameter ???
master_frame <- merge(rounds, companies, by.x = 'company_permalink', by.y = 'permalink')
nrow(master_frame)
summary(master_frame)
# Table 2 ----
# Summarising along funding type to find average funding of each type
type_wise_avg <- master_frame %>% group_by(funding_round_type) %>%
summarise(avg_funding = mean(raised_amount_usd, na.rm = TRUE)) %>%
arrange(avg_funding)
type_wise_avg
# We can see average funding for each of funding types - "venture", "angel", "seed", "private_equity"
# and answer Q.1 to Q.4
# Q.5 The most suitable investment type for Spark.
# (Average funding per investment round between 5 million to 15 million USD)
filter(type_wise_avg, avg_funding > 5000000 & avg_funding < 15000000)
# We find that venture type funding is most suitable for Spark Funds
# having average funding per round between 5 million and 15 million
# Table 3 ----
# Q.1 to Q.3 - Top 3 english speaking countries
# Filter data for venture type funding only
venture_data <- filter(master_frame, funding_round_type == "venture")
# Summarising by country code to find total funding of each country code
country_wise_gp <- venture_data %>% group_by(country_code) %>%
summarise(total_rounds = n(),
total_funding = sum(raised_amount_usd, na.rm = TRUE)) %>%
arrange(desc(total_funding))
# Which to consider for best countries, total_funding or total_rounds ???
head(country_wise_gp, 10)
# From the given list of countries where English is an official language,
# by general convention for country codes, we have -
# Top English speaking country = United States (USA)
# 2nd English speaking country = United Kingdom (GBR)
# 3rd English speaking country = India (IND)
master_frame <- filter(venture_data, country_code == "usa" |
country_code == "gbr" |
country_code == "ind")
rm(companies, country_wise_gp, rounds, type_wise_avg, venture_data)
# Table 5 ----
# Lets map the business sectors from mapping file.
# Lets see how many match
sum(master_frame$category_list %in% mapping$category_list)
# Lets see how many don't match
sum(!master_frame$category_list %in% mapping$category_list)
# Lets see why so many don't match
master_frame$category_list[!master_frame$category_list %in% mapping$category_list]
# Seems like sub-sectors given after main sector separated by"|"
# This must cause mismatch
library(stringr)
# Splitting category_list column on basis of occurence of "|" into columns
# and assigning 1st column to "primary_sector" column in master_frame
?str_split
sectors <- str_split(master_frame$category_list,"[|]", simplify = T)
?str_split
sectors <- as.data.frame(sectors, stringsAsFactors = FALSE)
master_frame$primary_sector <- sectors$V1
# Lets see mismatch again
sum(!master_frame$primary_sector %in% mapping$category_list)
master_frame$primary_sector[!master_frame$primary_sector %in%
mapping$category_list]
# Lets see vice-versa i.e. which sectors in mapping are not in masterframe
mapping$category_list[!mapping$category_list %in%
master_frame$primary_sector]
# We see that category_list contains distorted names of many categories
# e.g.
# "Natural Language Processing" spelled as "0tural Language Processing"
# "Nanotechnology" spelled as "0notechnology"
# "Natural Resources" spelled as "0tural Resources"
# So here is a common anomaly where somehow pattern "na" is changed to "0"
# in all the strings containing "na"
# Correcting this anomaly
mapping$category_list <- str_replace_all(mapping$category_list,
"0", "na")
# Check again
sum(!master_frame$primary_sector %in% mapping$category_list)
# Few still remain. Lets see which are they
master_frame$primary_sector[!master_frame$primary_sector %in% mapping$category_list]
mapping$category_list[!mapping$category_list %in% master_frame$primary_sector]
# Now, "0" in category name "Enterprise 2.0" is also replaced by "na"
# which was not meant to be replaced. Correfct it.
mapping$category_list <- str_replace_all(mapping$category_list,
"2.na", "2.0")
# Check where all "0" are now.
mapping$category_list[which(str_detect(mapping$category_list, "0"))]
# Anomaly removed.
require(reshape2)
# Convert mapping from wide to long format
?melt
long_mapping <- melt(mapping, id.vars = "category_list",
value.name = "value")
# Cleaning long mapping
new_mapping <- long_mapping[long_mapping$value == 1, ]
# Check for blank
View(new_mapping[new_mapping$category_list =="", ])
# Removing rows with blanks, and 3rd column altogether.
new_mapping <- new_mapping[new_mapping$category_list != "", -3]
colnames(new_mapping)[2] <- 'main_sector'
# Merge master_frame with new_mapping; clean & long form of mapping
# Mapping by outer merge as inner merge is causing loss of data
mapped_master <- merge(master_frame, new_mapping,
by.x = "primary_sector",
by.y = "category_list", all.x = T)
# We have deliberately kept all.x merge because ??
# inner merge is causing loss of data whose main sector is not available
# which may lead to erroneous result (like % of total, total count, total sum etc.)
rm(mapping, long_mapping, new_mapping)
# Now we know -
# Most suitable funding type for Spark Funds - venture
# Top 3 English speaking countries - USA, GBR and IND
# Range of funding preferred by Spark Funds - Between 5 million and 15 million
# 1. Making data frames for each of Top 3 countries with desired filters
usa_df <- filter(mapped_master, country_code == "usa")
usa_gp <- group_by(usa_df, main_sector)
usa_summary <- summarise(usa_gp, total_of_investments = sum(raised_amount_usd, na.rm = T),
no_of_investments = n())
arrange(usa_summary, desc(total_of_investments, no_of_investments))
gbr_df <- filter(mapped_master, country_code == "gbr")
gbr_gp <- group_by(gbr_df, main_sector)
gbr_summary <- summarise(gbr_gp, total_of_investments = sum(raised_amount_usd, na.rm = T),
no_of_investments = n())
arrange(gbr_summary, desc(total_of_investments, no_of_investments))
ind_df <- filter(mapped_master, country_code == "ind")
ind_gp <- group_by(ind_df, main_sector)
ind_summary <- summarise(ind_gp, total_of_investments = sum(raised_amount_usd, na.rm = T),
no_of_investments = n())
arrange(ind_summary, desc(total_of_investments, no_of_investments))
# Table 5 can be answered now.
###### --------- ########### --------- ######
|
/spark fund.R
|
no_license
|
shantam-srivastava/Spark-fund-Case-study
|
R
| false
| false
| 9,082
|
r
|
#### ---- SPARK FUND CASE STUDY ---- ####
# Loading datasets
companies <- read.delim("companies.txt", stringsAsFactors = FALSE)
rounds <- read.csv("rounds2.csv", stringsAsFactors = FALSE)
mapping <- read.csv("mapping.csv", stringsAsFactors = FALSE, check.names = F)
str(companies)
require(dplyr)
glimpse(rounds)
View(mapping)
names(companies)
names(rounds)
names(mapping)
# , check.names = F to keep column names from distorting
# Check NA's
sum(is.na(companies))
sum(is.na(rounds))
sum(is.na(mapping))
colSums(is.na(rounds))
# Check blanks
colSums(companies == "")
colSums(rounds == "")
colSums(mapping == "")
# Table 1 ----
# To avoid error due to case sensitive nature of R
rounds$company_permalink <- tolower(rounds$company_permalink)
companies$permalink <- tolower(companies$permalink)
# Or we can do so in just one go -->>
companies <- sapply(companies, tolower)
companies <- as.data.frame(companies, stringsAsFactors = F)
rounds <- sapply(rounds, tolower)
rounds <- as.data.frame(rounds, stringsAsFactors = F)
# But keep an eye on structure as sapply outupt is matrix
str(rounds)
rounds$raised_amount_usd <- as.numeric(rounds$raised_amount_usd)
str(rounds)
?mutate_if
# Or better use mutate_if of dplyr
mapping <- mutate_if(mapping, is.character, tolower)
str(mapping)
# Q.1 No. of unique companies in rounds
length(unique(rounds$company_permalink))
# 66368
# Q.2 No. of unique companies in companies
k <- distinct(companies, permalink)
count(k)
# 66368
# Q.3 In the companies data frame, which column can be used as the unique key for each company?
# Count distinct values in each column and find out
sapply(companies, n_distinct)
n_distinct(companies$permalink)
# Q.4 Are there any companies in the companies file which are not present in rounds?
sum(!companies$permalink %in% rounds$company_permalink) # The NOT operator -->> "!"
# Q.5 Merge the two data frames as master_frame.
# How many observations are present in master_frame?
master_frame <- merge(rounds, companies, by = 'permalink')
# error because common column name is different in both datasets
# by parameter ???
master_frame <- merge(rounds, companies, by.x = 'company_permalink', by.y = 'permalink')
nrow(master_frame)
summary(master_frame)
# Table 2 ----
# Summarising along funding type to find average funding of each type
type_wise_avg <- master_frame %>% group_by(funding_round_type) %>%
summarise(avg_funding = mean(raised_amount_usd, na.rm = TRUE)) %>%
arrange(avg_funding)
type_wise_avg
# We can see average funding for each of funding types - "venture", "angel", "seed", "private_equity"
# and answer Q.1 to Q.4
# Q.5 The most suitable investment type for Spark.
# (Average funding per investment round between 5 million to 15 million USD)
filter(type_wise_avg, avg_funding > 5000000 & avg_funding < 15000000)
# We find that venture type funding is most suitable for Spark Funds
# having average funding per round between 5 million and 15 million
# Table 3 ----
# Q.1 to Q.3 - Top 3 english speaking countries
# Filter data for venture type funding only
venture_data <- filter(master_frame, funding_round_type == "venture")
# Summarising by country code to find total funding of each country code
country_wise_gp <- venture_data %>% group_by(country_code) %>%
summarise(total_rounds = n(),
total_funding = sum(raised_amount_usd, na.rm = TRUE)) %>%
arrange(desc(total_funding))
# Which to consider for best countries, total_funding or total_rounds ???
head(country_wise_gp, 10)
# From the given list of countries where English is an official language,
# by general convention for country codes, we have -
# Top English speaking country = United States (USA)
# 2nd English speaking country = United Kingdom (GBR)
# 3rd English speaking country = India (IND)
master_frame <- filter(venture_data, country_code == "usa" |
country_code == "gbr" |
country_code == "ind")
rm(companies, country_wise_gp, rounds, type_wise_avg, venture_data)
# Table 5 ----
# Lets map the business sectors from mapping file.
# Lets see how many match
sum(master_frame$category_list %in% mapping$category_list)
# Lets see how many don't match
sum(!master_frame$category_list %in% mapping$category_list)
# Lets see why so many don't match
master_frame$category_list[!master_frame$category_list %in% mapping$category_list]
# Seems like sub-sectors given after main sector separated by"|"
# This must cause mismatch
library(stringr)
# Splitting category_list column on basis of occurence of "|" into columns
# and assigning 1st column to "primary_sector" column in master_frame
?str_split
sectors <- str_split(master_frame$category_list,"[|]", simplify = T)
?str_split
sectors <- as.data.frame(sectors, stringsAsFactors = FALSE)
master_frame$primary_sector <- sectors$V1
# Lets see mismatch again
sum(!master_frame$primary_sector %in% mapping$category_list)
master_frame$primary_sector[!master_frame$primary_sector %in%
mapping$category_list]
# Lets see vice-versa i.e. which sectors in mapping are not in masterframe
mapping$category_list[!mapping$category_list %in%
master_frame$primary_sector]
# We see that category_list contains distorted names of many categories
# e.g.
# "Natural Language Processing" spelled as "0tural Language Processing"
# "Nanotechnology" spelled as "0notechnology"
# "Natural Resources" spelled as "0tural Resources"
# So here is a common anomaly where somehow pattern "na" is changed to "0"
# in all the strings containing "na"
# Correcting this anomaly
mapping$category_list <- str_replace_all(mapping$category_list,
"0", "na")
# Check again
sum(!master_frame$primary_sector %in% mapping$category_list)
# Few still remain. Lets see which are they
master_frame$primary_sector[!master_frame$primary_sector %in% mapping$category_list]
mapping$category_list[!mapping$category_list %in% master_frame$primary_sector]
# Now, "0" in category name "Enterprise 2.0" is also replaced by "na"
# which was not meant to be replaced. Correfct it.
mapping$category_list <- str_replace_all(mapping$category_list,
"2.na", "2.0")
# Check where all "0" are now.
mapping$category_list[which(str_detect(mapping$category_list, "0"))]
# Anomaly removed.
require(reshape2)
# Convert mapping from wide to long format
?melt
long_mapping <- melt(mapping, id.vars = "category_list",
value.name = "value")
# Cleaning long mapping
new_mapping <- long_mapping[long_mapping$value == 1, ]
# Check for blank
View(new_mapping[new_mapping$category_list =="", ])
# Removing rows with blanks, and 3rd column altogether.
new_mapping <- new_mapping[new_mapping$category_list != "", -3]
colnames(new_mapping)[2] <- 'main_sector'
# Merge master_frame with new_mapping; clean & long form of mapping
# Mapping by outer merge as inner merge is causing loss of data
mapped_master <- merge(master_frame, new_mapping,
by.x = "primary_sector",
by.y = "category_list", all.x = T)
# We have deliberately kept all.x merge because ??
# inner merge is causing loss of data whose main sector is not available
# which may lead to erroneous result (like % of total, total count, total sum etc.)
rm(mapping, long_mapping, new_mapping)
# Now we know -
# Most suitable funding type for Spark Funds - venture
# Top 3 English speaking countries - USA, GBR and IND
# Range of funding preferred by Spark Funds - Between 5 million and 15 million
# 1. Making data frames for each of Top 3 countries with desired filters
usa_df <- filter(mapped_master, country_code == "usa")
usa_gp <- group_by(usa_df, main_sector)
usa_summary <- summarise(usa_gp, total_of_investments = sum(raised_amount_usd, na.rm = T),
no_of_investments = n())
arrange(usa_summary, desc(total_of_investments, no_of_investments))
gbr_df <- filter(mapped_master, country_code == "gbr")
gbr_gp <- group_by(gbr_df, main_sector)
gbr_summary <- summarise(gbr_gp, total_of_investments = sum(raised_amount_usd, na.rm = T),
no_of_investments = n())
arrange(gbr_summary, desc(total_of_investments, no_of_investments))
ind_df <- filter(mapped_master, country_code == "ind")
ind_gp <- group_by(ind_df, main_sector)
ind_summary <- summarise(ind_gp, total_of_investments = sum(raised_amount_usd, na.rm = T),
no_of_investments = n())
arrange(ind_summary, desc(total_of_investments, no_of_investments))
# Table 5 can be answered now.
###### --------- ########### --------- ######
|
# Libraries ----
library(shiny)
library(shinydashboard)
library(ggplot2)
library(tidytext) # for transforming text
library(dplyr) # for data wrangling
library(ggwordcloud) # to render wordclouds
# default text
demo_text <- ""
# . dashboardPage ----
ui <- dashboardPage(
dashboardHeader(title = "Congratulations!"),
dashboardSidebar(
textAreaInput("theText",
"Enter text here",
value = demo_text,
height = "200px"),
sliderInput("minWord", "Omit words that occur less than", 1, 20, 3),
sliderInput("topWords", "Maximum number of words to include", 1, 100, 100),
sliderInput("maxTextSize", "Maximum text size", 10, 100, 40),
sliderInput("plotHeight", "Aspect Ratio 1:", 0.5, 2, 1, .05),
selectInput("colourScheme", "Colour Scheme",
list(
"Default" = 1,
"Viridis"= 2,
"Magma" = 3,
"Inferno" = 4,
"Plasma" = 5,
"PsyTeachR" = 6
),
selected = 1
),
HTML("Download the <a href='https://github.com/debruine/shiny_apps/tree/master/wordcloud'>app code here<a/>.")
),
dashboardBody(
plotOutput("wordCloudPlot")
),
title = "Congratulations!"
)
# Define server logic ----
server <- function(input, output, session) {
output$wordCloudPlot <- renderPlot({
# process the text
text_table <- tibble(text = input$theText)
# this is better than the next two lines, but requires tidytext,
# which isn't installed on our shiny server
word_table <- unnest_tokens(text_table, "word", "text") %>%
#words <- input$theText %>% tolower() %>% strsplit("[^a-zA-Z]+")
#word_table <- tibble(word = words[[1]]) %>%
count(word) %>%
filter(n >= input$minWord) %>%
arrange(desc(n)) %>%
head(input$topWords)
# create the ggwordcloud
thePlot <- ggplot(word_table, aes(label = word, color = word, size = n)) +
geom_text_wordcloud_area() +
scale_size_area(max_size = input$maxTextSize) +
theme_minimal()
# set the colour scheme
if (input$colourScheme == 2) {
thePlot <- thePlot + scale_colour_viridis_d()
} else if (input$colourScheme == 3) {
thePlot <- thePlot + scale_colour_viridis_d(option = "A")
} else if (input$colourScheme == 4) {
thePlot <- thePlot + scale_colour_viridis_d(option = "B")
} else if (input$colourScheme == 5) {
thePlot <- thePlot + scale_colour_viridis_d(option = "C")
} else if (input$colourScheme == 6) {
ptrc <- c("#983E82","#E2A458","#F5DC70","#59935B","#467AAC","#61589C")
palette <- rep(ptrc, length.out = nrow(word_table))
thePlot <- thePlot + scale_colour_manual(values = palette)
}
thePlot
},
width = "auto",
height = function() {
session$clientData$output_wordCloudPlot_width*input$plotHeight
})
}
shinyApp(ui, server)
|
/congrats/app.R
|
no_license
|
IharValovich/shiny_apps
|
R
| false
| false
| 2,976
|
r
|
# Libraries ----
library(shiny)
library(shinydashboard)
library(ggplot2)
library(tidytext) # for transforming text
library(dplyr) # for data wrangling
library(ggwordcloud) # to render wordclouds
# default text
demo_text <- ""
# . dashboardPage ----
ui <- dashboardPage(
dashboardHeader(title = "Congratulations!"),
dashboardSidebar(
textAreaInput("theText",
"Enter text here",
value = demo_text,
height = "200px"),
sliderInput("minWord", "Omit words that occur less than", 1, 20, 3),
sliderInput("topWords", "Maximum number of words to include", 1, 100, 100),
sliderInput("maxTextSize", "Maximum text size", 10, 100, 40),
sliderInput("plotHeight", "Aspect Ratio 1:", 0.5, 2, 1, .05),
selectInput("colourScheme", "Colour Scheme",
list(
"Default" = 1,
"Viridis"= 2,
"Magma" = 3,
"Inferno" = 4,
"Plasma" = 5,
"PsyTeachR" = 6
),
selected = 1
),
HTML("Download the <a href='https://github.com/debruine/shiny_apps/tree/master/wordcloud'>app code here<a/>.")
),
dashboardBody(
plotOutput("wordCloudPlot")
),
title = "Congratulations!"
)
# Define server logic ----
server <- function(input, output, session) {
output$wordCloudPlot <- renderPlot({
# process the text
text_table <- tibble(text = input$theText)
# this is better than the next two lines, but requires tidytext,
# which isn't installed on our shiny server
word_table <- unnest_tokens(text_table, "word", "text") %>%
#words <- input$theText %>% tolower() %>% strsplit("[^a-zA-Z]+")
#word_table <- tibble(word = words[[1]]) %>%
count(word) %>%
filter(n >= input$minWord) %>%
arrange(desc(n)) %>%
head(input$topWords)
# create the ggwordcloud
thePlot <- ggplot(word_table, aes(label = word, color = word, size = n)) +
geom_text_wordcloud_area() +
scale_size_area(max_size = input$maxTextSize) +
theme_minimal()
# set the colour scheme
if (input$colourScheme == 2) {
thePlot <- thePlot + scale_colour_viridis_d()
} else if (input$colourScheme == 3) {
thePlot <- thePlot + scale_colour_viridis_d(option = "A")
} else if (input$colourScheme == 4) {
thePlot <- thePlot + scale_colour_viridis_d(option = "B")
} else if (input$colourScheme == 5) {
thePlot <- thePlot + scale_colour_viridis_d(option = "C")
} else if (input$colourScheme == 6) {
ptrc <- c("#983E82","#E2A458","#F5DC70","#59935B","#467AAC","#61589C")
palette <- rep(ptrc, length.out = nrow(word_table))
thePlot <- thePlot + scale_colour_manual(values = palette)
}
thePlot
},
width = "auto",
height = function() {
session$clientData$output_wordCloudPlot_width*input$plotHeight
})
}
shinyApp(ui, server)
|
#This script creates a function to test ridge regression
rrfun <- function(ti, df){
#Remove all values that cannot be used to predict
df$Date <- NULL
df$Depth.mean <- NULL
df$Width.mean <- NULL
df$Vertical.mean <- NULL
df <- na.omit(df)
#Create train and validate sets
#train.index <- sample(nrow(df), nrow(df)*perTr, replace=FALSE)
tr <- df[ti,]
val <- df[-ti,]
#Construct model
x.tr2 <- model.matrix(NumberOfAvalanches ~ Preci.mean + SnowDepth.mean + Snowfall.mean + Max_Temperature.mean + Min_Temperature.mean + MaxWindSpeed.mean, data = tr)[,-1]
y.tr2 <- tr$NumberOfAvalanches
x.val2 <- model.matrix(NumberOfAvalanches ~ Preci.mean + SnowDepth.mean + Snowfall.mean + Max_Temperature.mean + Min_Temperature.mean + MaxWindSpeed.mean, data = val)[,-1]
y.val2 <- val$NumberOfAvalanches
#CV to obtain best lambda
set.seed(10)#
rr.cv2 <- cv.glmnet(x.tr2, y.tr2, alpha=0)
#plot(rr.cv2)
rr.bestlam <- rr.cv2$lambda.min
rr.goodlam <- rr.cv2$lambda.1se
# predict validation set using best lambda and calculate RMSE
rr.fit2 <- glmnet(x.tr2, y.tr2, alpha = 0)
#plot(rr.fit2, xvar = "lambda", label=TRUE)
rr.pred2 <- predict(rr.fit2, s=rr.bestlam, newx = x.val2)
#sqrt(mean(rr.pred2-y.val2)^2)
#=0.2433208
#Check accuracy
#out[i,1] <- tr[1,1]
out <- 1-(mean(trunc(rr.pred2) != val$NumberOfAvalanches))
#rm(rr.pred2, val, x.tr2, x.val2, perTr, rr.bestlam, rr.cv2,rr.fit2,rr.goodlam,s,train.index, y.tr2,y.val2)
return(out)
}
|
/rrfun.R
|
no_license
|
justinlboyer/avalanche
|
R
| false
| false
| 1,480
|
r
|
#This script creates a function to test ridge regression
rrfun <- function(ti, df){
#Remove all values that cannot be used to predict
df$Date <- NULL
df$Depth.mean <- NULL
df$Width.mean <- NULL
df$Vertical.mean <- NULL
df <- na.omit(df)
#Create train and validate sets
#train.index <- sample(nrow(df), nrow(df)*perTr, replace=FALSE)
tr <- df[ti,]
val <- df[-ti,]
#Construct model
x.tr2 <- model.matrix(NumberOfAvalanches ~ Preci.mean + SnowDepth.mean + Snowfall.mean + Max_Temperature.mean + Min_Temperature.mean + MaxWindSpeed.mean, data = tr)[,-1]
y.tr2 <- tr$NumberOfAvalanches
x.val2 <- model.matrix(NumberOfAvalanches ~ Preci.mean + SnowDepth.mean + Snowfall.mean + Max_Temperature.mean + Min_Temperature.mean + MaxWindSpeed.mean, data = val)[,-1]
y.val2 <- val$NumberOfAvalanches
#CV to obtain best lambda
set.seed(10)#
rr.cv2 <- cv.glmnet(x.tr2, y.tr2, alpha=0)
#plot(rr.cv2)
rr.bestlam <- rr.cv2$lambda.min
rr.goodlam <- rr.cv2$lambda.1se
# predict validation set using best lambda and calculate RMSE
rr.fit2 <- glmnet(x.tr2, y.tr2, alpha = 0)
#plot(rr.fit2, xvar = "lambda", label=TRUE)
rr.pred2 <- predict(rr.fit2, s=rr.bestlam, newx = x.val2)
#sqrt(mean(rr.pred2-y.val2)^2)
#=0.2433208
#Check accuracy
#out[i,1] <- tr[1,1]
out <- 1-(mean(trunc(rr.pred2) != val$NumberOfAvalanches))
#rm(rr.pred2, val, x.tr2, x.val2, perTr, rr.bestlam, rr.cv2,rr.fit2,rr.goodlam,s,train.index, y.tr2,y.val2)
return(out)
}
|
## Summarizes data.
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## dataframe: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(dataframe=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
require(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(dataframe, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# pfelt modification: all instances with N=1 will have NAs in the sd
# datac <- datac[which(datac$N>1),] # remove rows
# datac[which(datac$N==1),]$sd <- 1 # set sd to 0
# datac[which(datac$N==1),]$se <- 1
# datac[which(datac$N==1),]$ci <- 1
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
isOptimization <- function(dat){
return(grepl("maximize", dat$training))
}
hasHyperTuning <- function(tuningMethod){
function(dat){
return(grepl(tuningMethod, dat$hyperparam_training))
}
}
usesVectors <- function(dat){
grepl(".*(-lda|-w2v|-d2v).*",dat$basedir)
}
usesLdaVectors <- function(dat){
grepl(".*-lda.*",dat$basedir)
}
usesWord2VecVectors <- function(dat){
grepl(".*-w2v.*",dat$basedir)
}
usesDoc2VecVectors <- function(dat){
grepl(".*-d2v.*",dat$basedir)
}
isLabelingStrategy <- function(labeling_strategy){
function(dat){
desiredLabelingStrategy <- if(is.null(labeling_strategy)) dat$labeling_strategy else labeling_strategy
return(dat$labeling_strategy==desiredLabelingStrategy)
}
}
not <- function(f){
function(dat){
return(!f(dat))
}
}
and <- function(...){
function(dat){
criteria <- list(...)
# start with everything matched
matchedRows <- rep(TRUE,dim(dat)[1])
# intersection of rows that match each criterion
for (criterion in criteria){
matchedRows <- matchedRows & criterion(dat)
}
return(matchedRows)
}
}
nameRows <- function(dat,name,criterion){
matchedRows <- criterion(dat)
# add algorithm name to matched rows
dat$algorithm[which(matchedRows)] <- name
return(dat)
}
massageData <- function(dat){
dat$labeled_acc = as.numeric(as.character(dat$labeled_acc))
dat$heldout_acc = as.numeric(as.character(dat$heldout_acc))
dat$top3_labeled_acc = as.numeric(as.character(dat$top3_labeled_acc))
dat$top3_heldout_acc = as.numeric(as.character(dat$top3_heldout_acc))
if (is.null(dat$dataset_type)){
dat$dataset_type <- dat$corpus
dat$corpus <- NULL
}
num_rows = dim(dat)[1]
# add truncate_unannotated_data=false if it doesn't exist
dat$truncate_unannotated_data <- if (is.null(dat$truncate_unannotated_data)) rep('false',num_rows) else dat$truncate_unannotated_data
# add basedir<-dataset_source if basedir doesn't exist
dat$basedir <- as.character(dat$basedir)
emptyrows <- which(is.na(dat$basedir))
dat$basedir[emptyrows] <- as.character(dat$dataset_source[emptyrows])
dat$basedir <- as.factor(dat$basedir)
########## derive 'algorithm' factor ##################
dat$algorithm <- rep("invalid",num_rows)
# baselines
dat <- nameRows(dat, 'baseline', and(isLabelingStrategy('UBASELINE')))
# itemresp variants
dat <- nameRows(dat, 'itemresp_s', and(isLabelingStrategy('ITEMRESP'), not(isOptimization), not(usesVectors)))
dat <- nameRows(dat, 'itemresp_m', and(isLabelingStrategy('ITEMRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varitemresp', and(isLabelingStrategy('VARITEMRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varitemresp_w2v', and(isLabelingStrategy('VARITEMRESP'), isOptimization, usesWord2VecVectors))
dat <- nameRows(dat, 'varitemresp_d2v', and(isLabelingStrategy('VARITEMRESP'), isOptimization, usesDoc2VecVectors))
# neutered variants
dat <- nameRows(dat, 'momresp_s', and(isLabelingStrategy('MOMRESP'), not(isOptimization), not(usesVectors)))
dat <- nameRows(dat, 'momresp_m', and(isLabelingStrategy('MOMRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varmomresp', and(isLabelingStrategy('VARMOMRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varmomresp_w2v', and(isLabelingStrategy('VARMOMRESP'), isOptimization, usesWord2VecVectors))
dat <- nameRows(dat, 'varmomresp_d2v', and(isLabelingStrategy('VARMOMRESP'), isOptimization, usesDoc2VecVectors))
# multiresp variants
dat <- nameRows(dat, 'multiresp_s', and(isLabelingStrategy('MULTIRESP'), not(isOptimization), not(usesVectors)))
dat <- nameRows(dat, 'multiresp_m', and(isLabelingStrategy('MULTIRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varmultiresp', and(isLabelingStrategy('VARMULTIRESP'), isOptimization, not(usesVectors)))
# logresp variants
dat <- nameRows(dat, 'logresp_st', and(isLabelingStrategy('LOGRESP_ST'), isOptimization, not(usesVectors))) # self training
dat <- nameRows(dat, 'logresp_m', and(isLabelingStrategy('LOGRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varlogresp', and(isLabelingStrategy('VARLOGRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varlogresp_w2v', and(isLabelingStrategy('VARLOGRESP'), isOptimization, usesWord2VecVectors))
dat <- nameRows(dat, 'varlogresp_d2v', and(isLabelingStrategy('VARLOGRESP'), isOptimization, usesDoc2VecVectors))
dat <- nameRows(dat, 'varlogresp_lda', and(isLabelingStrategy('VARLOGRESP'), isOptimization, usesLdaVectors))
# cslda
dat <- nameRows(dat, 'cslda_s', and(isLabelingStrategy('CSLDA'), not(isOptimization), not(usesVectors)))
dat <- nameRows(dat, 'cslda', and(isLabelingStrategy('CSLDA'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'cslda_lex_s', and(isLabelingStrategy('CSLDALEX'), not(isOptimization), not(usesVectors)))
dat <- nameRows(dat, 'cslda_lex', and(isLabelingStrategy('CSLDALEX'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'cslda_p', and(isLabelingStrategy('CSLDAP'), isOptimization, not(usesVectors))) # pipelined
dat <- nameRows(dat, 'cslda_s_p', and(isLabelingStrategy('CSLDAP'), not(isOptimization))) # pipelined w sampler
# fully discriminative
dat <- nameRows(dat, 'discrim', and(isLabelingStrategy('DISCRIM'), isOptimization, not(usesLdaVectors)))
dat <- nameRows(dat, 'discrim_lda', and(isLabelingStrategy('DISCRIM'), isOptimization, usesLdaVectors))
dat <- nameRows(dat, 'discrim_d2v', and(isLabelingStrategy('DISCRIM'), isOptimization, usesDoc2VecVectors))
dat <- nameRows(dat, 'discrim_w2v', and(isLabelingStrategy('DISCRIM'), isOptimization, usesWord2VecVectors))
# make 'algorithm' into factor
dat$algorithm <- factor(dat$algorithm)
########## derive 'corpus' factor ##################
dat$corpus <- rep("OTHER",num_rows)
# combine weather,weather-w2v,weather-d2v
if (any(grepl("weather",dat$basedir))){
dat[which(grepl("weather",dat$basedir)),]$corpus <- "WEATHER"
}
# combine newsgroups,newsgroups-w2v,newsgroups-d2v
if (any(grepl("newsgroups",dat$basedir))){
dat[which(grepl("newsgroups",dat$basedir)),]$corpus <- "NEWSGROUPS"
}
# combine cfgroups1000,cfgroups1000-w2v,cfgroups1000-d2v
if (any(grepl("cfgroups1000",dat$basedir))){
dat[which(grepl("cfgroups1000",dat$basedir)),]$corpus <- "CFGROUPS1000"
}
# combine twitterparaphrase,twitterparaphrase-w2v,twitterparaphrase-d2v
if (any(grepl("twitterparaphrase",dat$basedir))){
dat[which(grepl("twitterparaphrase",dat$basedir)),]$corpus <- "TWITTER_PARA"
}
# combine twittersentiment,twittersentiment-w2v,twittersentiment-d2v
if (any(grepl("twittersentiment",dat$basedir))){
dat[which(grepl("twittersentiment",dat$basedir)),]$corpus <- "TWITTER_SENT"
}
# combine compatibility experiments
if (any(grepl("twittersentiment",dat$basedir))){
dat[which(grepl("compatibility",dat$basedir)),]$corpus <- "COMPATIBILITY"
}
# treat simplified cfgroups as its own corpus
dat$corpus[which(dat$dataset=="cfsimplegroups1000a.json")] <- "CFSIMPLEGROUPS"
# make 'corpus' into factor
dat$corpus <- factor(dat$corpus)
########## miscellaneous ##################
# name num_annotators into a factor (so it can be used as a plotting facet)
if (!is.null(dat$num_annotators)){
dat$num_annotators <- factor(dat$num_annotators)
}
# rename k to 'd' and re-order
require(plyr)
dat$d <- sprintf("d = %g",dat$k)
dat$d <- factor(dat$d, levels = c('d = 1','d = 2','d = 3','d = 5','d = 10'))
# prettify factor names
if (!is.null(dat$inline_hyperparam_tuning)){
dat$tuning <- sprintf("Hyperparam Tuning = %s",dat$inline_hyperparam_tuning)
}
# eta variance -> factor
if (!is.null(dat$eta_variance)){
dat$eta_variance <- factor(dat$eta_variance)
}
# report invalid rows (weren't selected as part of a cohesive algorithm)
#valid_rows = which(dat$algorithm!='invalid')
valid_rows = which(dat$algorithm!='invalid')
print(c("total rows:",num_rows))
print(c("valid rows:",length(valid_rows)))
print(c("invalid rows:",length(which(dat$algorithm=='invalid'))))
return(dat)
}
plotAlgorithms <- function(dat, yvarname, title, xvarname="num_documents_with_annotations", ymin=min(dat[[yvarname]]), ymax=max(dat[[yvarname]]), ylabel="Accuracy", xlabel="Number of annotated instances x %s",
shapesize=1, xlim=NULL, divisor=1000, hide_legend=FALSE, algorithm_colors=NULL, algorithm_shapes=NULL, facets="~corpus~num_annotators~annotator_accuracy",
other_ggplot_elements=NULL, xbreaks=NULL){
# a modified colorblind-friendly pallette from http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/#a-colorblind-friendly-palette
# x tick label formatter
xformatter <- function(x){
# sci <- format(x,scientific=TRUE)
# sci <- gsub('^[0\\.]+e.*','0',sci)
# sci <- gsub('\\.\\de\\+0','e',sci)
# gsub('e\\+0','e',sci)
gsub('\\.0','',format(x))
}
if (is.null(dat$num_documents_with_annotations)){
dat$num_documents_with_annotations <- round(dat$num_annotations / d$k)
}
# what are the variables we should group by when calculating std dev?
groupvars <- strsplit(facets,'~')[[1]][] # use those we are using for for facets
groupvars <- c(groupvars, xvarname, "algorithm") # include the x axis and line identities (algorithm)
groupvars <- groupvars[lapply(groupvars,nchar)>0] # remove empty entries
dfc <- summarySE(dat, measurevar=yvarname, groupvars=groupvars)
if (!is.null(divisor)){
dfc[[xvarname]] <- dfc[[xvarname]]/divisor
if (!is.null(xbreaks)){
xbreaks <- xbreaks/divisor
}
}
# base plot
plt <- ggplot(dat=dfc, aes_string(x=xvarname, y=yvarname, color="algorithm", group="algorithm")) +
ggtitle(title) +
geom_errorbar(aes_string(ymin=sprintf("%s-sd",yvarname), ymax=sprintf("%s+sd",yvarname))) +
geom_line(size=0.8) +
geom_point(aes(shape=algorithm),size=shapesize,color='black') +
ylim(ymin,ymax) +
ylab(ylabel) +
xlab(sprintf(xlabel,format(divisor,big.mark=',',big.interval=3))) +
scale_x_continuous(labels=xformatter) +
theme(plot.title = element_text(lineheight=1.8,face='bold')) +
theme_bw()
# line shapes
if (!is.null(algorithm_colors)){
plt <- plt + scale_colour_manual(values=algorithm_colors)
}
# x breaks
if (!is.null(xbreaks)){
plt <- plt + scale_x_continuous(labels=xformatter, breaks = xbreaks)
}
# line colors
if (!is.null(algorithm_shapes)){
plt <- plt + scale_shape_manual(values=algorithm_shapes)
}
# facets
if (nchar(facets)>0){
plt <- plt + facet_grid(facets)
}
# hide legend
if (hide_legend){
plt <- plt + theme(legend.position='none')
}
# xlim
if (!is.null(xlim)){
plt <- plt + scale_x_continuous(limits=xlim,labels=xformatter)
}
# other
if (!is.null(other_ggplot_elements)){
for (el in other_ggplot_elements){
plt <- plt + el
}
}
return(plt)
}
# setup paths and packages
#install.packages("ggplot2")
require(ggplot2)
setwd('/aml/home/plf1/git/Experiments/plf1/TACL-2015-Vector-submission/csv')
# stop execution --- proceed manually
stop()
# experiments with:
# newsgroups, cfnewsgroups, weather, twittersentiment, twitterparaphrase, compatibility
data = read.csv("2015-06-25-taacl.csv")
#########################################################
# Prototyping
#########################################################
mdata <- massageData(data); d <- mdata
# choose a dataset
d <- mdata; d = mdata[which(mdata$corpus=="NEWSGROUPS"),]
d <- mdata; d = mdata[which(grepl("cfgroups",mdata$dataset)),]
d <- mdata; d = mdata[which(mdata$corpus=="NG"),]
d <- mdata; d = mdata[which(mdata$corpus=="DREDZE"),]
d <- mdata; d = mdata[which(mdata$corpus=="R8"),]
d <- mdata; d = mdata[which(mdata$corpus=="R52"),]
d <- mdata; d = d[which(d$algorithm=="baseline" | d$algorithm=="varmomresp" | d$algorithm=="varlogresp" | d$algorithm=="cslda"),]
d <- mdata; d = d[which(grepl("w2v",d$algorithm) | d$algorithm=="baseline" | d$algorithm=="varmomresp" | d$algorithm=="varlogresp" | d$algorithm=="cslda"),]
d <- mdata; d = d[which(grepl("cslda",d$algorithm)),]
d <- mdata; d = d[which(grepl("d2v",d$algorithm) | d$algorithm=="baseline" | d$algorithm=="varmomresp" | d$algorithm=="varlogresp" | d$algorithm=="cslda"),]
d <- mdata; d = d[which(grepl("lda",d$algorithm) | d$algorithm=="baseline" | d$algorithm=="varmomresp" | d$algorithm=="varlogresp" | d$algorithm=="cslda"),]
d <- mdata; d = d[which(grepl("discrim",d$algorithm)),]
d <- mdata; d = d[which(d$algorithm=="varlogresp" | d$algorithm=="logresp_m" | d$algorithm=="baseline"),]
d <- mdata; d = d[which(grepl("logresp",d$algorithm)),]
d <- mdata; d = d[which(grepl("discrim",d$algorithm)),]
d <- mdata; d = d[which(d$algorithm=="logresp"),]
d <- mdata; d = d[which(d$algorithm=="cslda_s" | grepl("w2v",d$algorithm)),]
d <- mdata; d = d[which(d$algorithm=="varlogresp_w2v" | d$algorithm=="varlogresp_d2v" | d$algorithm=="cslda_s" | d$algorithm=="baseline" | d$algorithm=="varlogresp"),]
facets <- "~annotator_accuracy~corpus~vary_annotator_rates"
xvarname <- "num_annotations"
plotAlgorithms(d,"labeled_acc","Inferred Label Accuracy",ymin=0.,ymax=1,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"unlabeled_acc","Unlabeled Label Accuracy",ymin=0,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"heldout_acc","Test Label Accuracy",ymin=0,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"log_joint","Inferred Label Accuracy",ymin=min(d$log_joint),ymax=max(d$log_joint),facets=facets,xvarname=xvarname)
plotAlgorithms(d,"overall_acc","Overall Accuracy",facets=facets,xvarname=xvarname)
plotAlgorithms(d,"btheta","BTheta",facets=facets,xvarname=xvarname)
plotAlgorithms(d,"bgamma","BGamma",ymin=0,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"cgamma","CGamma",ymin=0,ymax=50,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"bphi","BPhi",ymin=0,ymax=2,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"top3_labeled_acc","Top 3 Labeled Accuracy",ymin=0,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"annacc_rmse","Annotator RMSE",ymin=0,ylabel="Annotator RMSE",facets=facets,xvarname=xvarname)
plotAlgorithms(d,"annacc_mat_rmse","Annotator Matrix RMSE",ymin=0,ymax=.2,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"num_annotations","Inferred Label Accuracy",ymin=0,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"num_documents_with_annotations","Docs with Annotations",ymin=0,facets=facets,xvarname=xvarname)
plot(d$log_joint, d$labeled_acc)
j = d[which(d$algorithm!='itemresp' & d$algorithm!='momresp'),]
plotAlgorithms(j,"machacc_rmse","Machine RMSE",ymin=0)
plotAlgorithms(j,"machacc_mat_rmse","Machine MAT RMSE")
#############################################################################
# Enabling Crowdsourcing with document representations 2015
#############################################################################
height = 9
######################### newsgroups ###############################
data = read.csv("2015-06-25-taacl.csv")
levels=c('LogResp+w2v','LogResp','csLDA','MomResp','ItemResp','Majority') # determined line order in legend
alg_colors=c('csLDA'='#00BEC4', 'Majority'="#000000", 'MomResp'="#B69E00", 'LogResp'="#609BFF", 'LogResp+w2v'="#F8766D", 'ItemResp'='#00B937')
alg_shapes=c('csLDA'=3, 'Majority'=1, 'MomResp'=17, 'LogResp'=18, 'LogResp+w2v'=5, 'ItemResp'=6 )
width = 13
ymin = 0.55
ymax = 1
shapesize = 3
xvarname = "num_annotations"
# data
mdata <- massageData(data);
mdata <- mdata[which(mdata$algorithm=="varlogresp_w2v" | mdata$algorithm=="varlogresp" | mdata$algorithm=="varmomresp" | mdata$algorithm=="cslda_s" | mdata$algorithm=="baseline"),]
mdata$algorithm <- mapvalues(mdata$algorithm, from=c('baseline','varlogresp','varlogresp_w2v','cslda_s','varitemresp','varmomresp'), to=c('Majority','LogResp','LogResp+w2v','csLDA','ItemResp','MomResp')) # rename
mdata$algorithm <- factor(mdata$algorithm, levels=levels) # reorder
plotty <- function(d,hide_legend=FALSE){
plotAlgorithms(d,"labeled_acc","",xvarname=xvarname,ymin=ymin,ymax=ymax,facets="~corpus", shapesize=shapesize, algorithm_colors=alg_colors, algorithm_shapes=alg_shapes,
hide_legend=hide_legend,xlabel="Number of annotations x 1,000")
}
plotty(mdata[which(mdata$corpus=="NEWSGROUPS"),])
ggsave("../images/newsgroups.eps",width=width,height=height,units='cm')
######################### cfgroups1000 ###############################
data = read.csv("2015-06-25-taacl.csv")
levels=c('LogResp+w2v','LogResp','csLDA','MomResp','ItemResp','Majority') # determined line order in legend
alg_colors=c('csLDA'='#00BEC4', 'Majority'="#000000", 'MomResp'="#B69E00", 'LogResp'="#609BFF", 'LogResp+w2v'="#F8766D", 'ItemResp'='#00B937')
alg_shapes=c('csLDA'=3, 'Majority'=1, 'MomResp'=17, 'LogResp'=18, 'LogResp+w2v'=5, 'ItemResp'=6 )
width = 13
ymin = 0.0
ymax = 0.75
shapesize = 3
xvarname = "num_annotations"
# data
mdata <- massageData(data);
mdata <- mdata[which(mdata$algorithm=="varlogresp_w2v" | mdata$algorithm=="varlogresp" | mdata$algorithm=="varmomresp" | mdata$algorithm=="cslda_s" | mdata$algorithm=="baseline"),]
mdata$algorithm <- mapvalues(mdata$algorithm, from=c('baseline','varlogresp','varlogresp_w2v','cslda_s','varitemresp','varmomresp'), to=c('Majority','LogResp','LogResp+w2v','csLDA','ItemResp','MomResp')) # rename
mdata$algorithm <- factor(mdata$algorithm, levels=levels) # reorder
plotty <- function(d,hide_legend=FALSE){
plotAlgorithms(d,"labeled_acc","",xvarname=xvarname,ymin=ymin,ymax=ymax,facets="~corpus~dataset", shapesize=shapesize, algorithm_colors=alg_colors, algorithm_shapes=alg_shapes,
hide_legend=hide_legend,xlabel="Number of annotations x 1,000")
}
plotty(mdata[which(mdata$corpus=="CFGROUPS1000"),])
ggsave("../images/cfgroups1000.eps",width=width,height=height,units='cm')
##################################### general tweet sentiment ###################################
data = read.csv("2015-06-25-taacl.csv")
levels=c('LogResp+w2v','LogResp','csLDA','MomResp','ItemResp','Majority') # determined line order in legend
alg_colors=c('csLDA'='#00BEC4', 'Majority'="#000000", 'MomResp'="#B69E00", 'LogResp'="#609BFF", 'LogResp+w2v'="#F8766D", 'ItemResp'='#00B937')
alg_shapes=c('csLDA'=3, 'Majority'=1, 'MomResp'=17, 'LogResp'=18, 'LogResp+w2v'=5, 'ItemResp'=6 )
width = 13
ymin = 0.6
ymax = 0.8
shapesize = 3
xvarname = "num_annotations"
# data
mdata <- massageData(data); mdata <- mdata[which(!grepl("itemresp_",mdata$algorithm)),] # strip useless itemresp_w2v
# mdata <- mdata[which(grepl("discrim",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("itemresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("momresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("logresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("cslda",mdata$algorithm) | mdata$algorithm=="baseline"),]
mdata <- mdata[which(mdata$algorithm=="varlogresp_w2v" | mdata$algorithm=="varlogresp" | mdata$algorithm=="cslda_s" | mdata$algorithm=="varmomresp" | mdata$algorithm=="varitemresp" | mdata$algorithm=="baseline"),]
mdata$algorithm <- mapvalues(mdata$algorithm, from=c('baseline','varlogresp','varlogresp_w2v','cslda_s','varitemresp','varmomresp'), to=c('Majority','LogResp','LogResp+w2v','csLDA','ItemResp','MomResp')) # rename
mdata$algorithm <- factor(mdata$algorithm, levels=levels) # reorder
plotty <- function(d,hide_legend=FALSE){
plotAlgorithms(d,"labeled_acc","",xvarname=xvarname,ymin=ymin,ymax=ymax,facets="~corpus", shapesize=shapesize, algorithm_colors=alg_colors, algorithm_shapes=alg_shapes,
hide_legend=hide_legend,xlabel="Number of annotations x 1,000")
}
plotty(mdata[which(mdata$corpus=="TWITTER_SENT"),])
ggsave("../images/twittersentiment.eps",width=width,height=height,units='cm')
######################################## weather tweet sentiment #######################################
data = read.csv("2015-06-25-taacl.csv")
levels=c('LogResp+w2v','LogResp','csLDA','MomResp','ItemResp','Majority') # determined line order in legend
alg_colors=c('csLDA'='#00BEC4', 'Majority'="#000000", 'MomResp'="#B69E00", 'LogResp'="#609BFF", 'LogResp+w2v'="#F8766D", 'ItemResp'='#00B937')
alg_shapes=c('csLDA'=3, 'Majority'=1, 'MomResp'=17, 'LogResp'=18, 'LogResp+w2v'=5, 'ItemResp'=6 )
width = 13
ymin = 0.55
ymax = 1
shapesize = 3
xvarname = "num_annotations"
# data
mdata <- massageData(data); mdata <- mdata[which(!grepl("itemresp_",mdata$algorithm)),] # strip useless itemresp_w2v
# mdata <- mdata[which(grepl("discrim",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("itemresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("momresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("logresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("cslda",mdata$algorithm) | mdata$algorithm=="baseline"),]
mdata <- mdata[which(mdata$algorithm=="varlogresp_w2v" | mdata$algorithm=="varlogresp" | mdata$algorithm=="cslda_s" | mdata$algorithm=="varmomresp" | mdata$algorithm=="varitemresp" | mdata$algorithm=="baseline"),]
mdata$algorithm <- mapvalues(mdata$algorithm, from=c('baseline','varlogresp','varlogresp_w2v','cslda_s','varitemresp','varmomresp'), to=c('Majority','LogResp','LogResp+w2v','csLDA','ItemResp','MomResp')) # rename
mdata$algorithm <- factor(mdata$algorithm, levels=levels) # reorder
plotty <- function(d,hide_legend=FALSE){
plotAlgorithms(d,"labeled_acc","",xvarname=xvarname,ymin=ymin,ymax=ymax,facets="~corpus", shapesize=shapesize, algorithm_colors=alg_colors, algorithm_shapes=alg_shapes,
hide_legend=hide_legend,xlabel="Number of annotations x 1,000")
}
plotty(mdata[which(mdata$corpus=="WEATHER"),])
ggsave("../images/weather.eps",width=width,height=height,units='cm')
######################################## compatibility #######################################
data = read.csv("2015-06-25-taacl.csv")
levels=c('LogResp+w2v','LogResp','csLDA','MomResp','ItemResp','Majority') # determined line order in legend
alg_colors=c('csLDA'='#00BEC4', 'Majority'="#000000", 'MomResp'="#B69E00", 'LogResp'="#609BFF", 'LogResp+w2v'="#F8766D", 'ItemResp'='#00B937')
alg_shapes=c('csLDA'=3, 'Majority'=1, 'MomResp'=17, 'LogResp'=18, 'LogResp+w2v'=5, 'ItemResp'=6 )
width = 13
ymin = 0.93
ymax = 1
shapesize = 3
xvarname = "num_annotations"
# data
mdata <- massageData(data); mdata$algorithm[which(mdata$algorithm=="varitemresp_w2v")] <- "varitemresp" # strip useless itemresp_w2v
# mdata <- mdata[which(grepl("discrim",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("itemresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("momresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("logresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("cslda",mdata$algorithm) | mdata$algorithm=="baseline"),]
mdata <- mdata[which(mdata$algorithm=="varlogresp_w2v" | mdata$algorithm=="varitemresp" | mdata$algorithm=="baseline"),]
mdata <- mdata[which(mdata$num_annotations>5000),]
mdata$algorithm <- mapvalues(mdata$algorithm, from=c('baseline','varlogresp','varlogresp_w2v','cslda_s','varitemresp','varmomresp'), to=c('Majority','LogResp','LogResp+w2v','csLDA','ItemResp','MomResp')) # rename
mdata$algorithm <- factor(mdata$algorithm, levels=levels) # reorder
plotty <- function(d,hide_legend=FALSE){
plotAlgorithms(d,"labeled_acc","",xvarname=xvarname,ymin=ymin,ymax=ymax,facets="~corpus", shapesize=shapesize, algorithm_colors=alg_colors, algorithm_shapes=alg_shapes,
hide_legend=hide_legend,xlabel="Number of annotations x 1,000")
}
plotty(mdata[which(mdata$corpus=="COMPATIBILITY"),])
ggsave("../images/compatibility.eps",width=width,height=height,units='cm')
############################################# tweet paraphrase dataset ##############################################################
data = read.csv("2015-06-25-taacl.csv")
levels=c('LogResp+w2v','LogResp','csLDA','MomResp','ItemResp','Majority') # determined line order in legend
alg_colors=c('csLDA'='#00BEC4', 'Majority'="#000000", 'MomResp'="#B69E00", 'LogResp'="#609BFF", 'LogResp+w2v'="#F8766D", 'ItemResp'='#00B937')
alg_shapes=c('csLDA'=3, 'Majority'=1, 'MomResp'=17, 'LogResp'=18, 'LogResp+w2v'=5, 'ItemResp'=6 )
width = 13
ymin = 0.75
ymax = 0.95
shapesize = 3
xvarname = "num_annotations"
# data
mdata <- massageData(data); mdata <- mdata[which(!grepl("itemresp_",mdata$algorithm)),] # strip useless itemresp_w2v
# mdata <- mdata[which(grepl("discrim",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("itemresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("momresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("logresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("cslda",mdata$algorithm) | mdata$algorithm=="baseline"),]
mdata <- mdata[which(mdata$algorithm=="varlogresp_w2v" | mdata$algorithm=="varitemresp" | mdata$algorithm=="baseline"),]
mdata$algorithm <- mapvalues(mdata$algorithm, from=c('baseline','varlogresp','varlogresp_w2v','cslda_s','varitemresp','varmomresp'), to=c('Majority','LogResp','LogResp+w2v','csLDA','ItemResp','MomResp')) # rename
mdata$algorithm <- factor(mdata$algorithm, levels=levels) # reorder
plotty <- function(d,hide_legend=FALSE){
plotAlgorithms(d,"labeled_acc","",xvarname=xvarname,ymin=ymin,ymax=ymax,facets="~corpus", shapesize=shapesize, algorithm_colors=alg_colors, algorithm_shapes=alg_shapes,
hide_legend=hide_legend,xlabel="Number of annotations x 1,000")
}
plotty(mdata[which(mdata$corpus=="TWITTER_PARA"),])
ggsave("../images/twitterparaphrase.eps",width=width,height=height,units='cm')
|
/plf1/TACL-2015-Vector-submission/learningcurves.R
|
no_license
|
BYU-NLP-Lab/Experiments
|
R
| false
| false
| 28,329
|
r
|
## Summarizes data.
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## dataframe: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(dataframe=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
require(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(dataframe, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# pfelt modification: all instances with N=1 will have NAs in the sd
# datac <- datac[which(datac$N>1),] # remove rows
# datac[which(datac$N==1),]$sd <- 1 # set sd to 0
# datac[which(datac$N==1),]$se <- 1
# datac[which(datac$N==1),]$ci <- 1
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
isOptimization <- function(dat){
return(grepl("maximize", dat$training))
}
hasHyperTuning <- function(tuningMethod){
function(dat){
return(grepl(tuningMethod, dat$hyperparam_training))
}
}
usesVectors <- function(dat){
grepl(".*(-lda|-w2v|-d2v).*",dat$basedir)
}
usesLdaVectors <- function(dat){
grepl(".*-lda.*",dat$basedir)
}
usesWord2VecVectors <- function(dat){
grepl(".*-w2v.*",dat$basedir)
}
usesDoc2VecVectors <- function(dat){
grepl(".*-d2v.*",dat$basedir)
}
isLabelingStrategy <- function(labeling_strategy){
function(dat){
desiredLabelingStrategy <- if(is.null(labeling_strategy)) dat$labeling_strategy else labeling_strategy
return(dat$labeling_strategy==desiredLabelingStrategy)
}
}
not <- function(f){
function(dat){
return(!f(dat))
}
}
and <- function(...){
function(dat){
criteria <- list(...)
# start with everything matched
matchedRows <- rep(TRUE,dim(dat)[1])
# intersection of rows that match each criterion
for (criterion in criteria){
matchedRows <- matchedRows & criterion(dat)
}
return(matchedRows)
}
}
nameRows <- function(dat,name,criterion){
matchedRows <- criterion(dat)
# add algorithm name to matched rows
dat$algorithm[which(matchedRows)] <- name
return(dat)
}
massageData <- function(dat){
dat$labeled_acc = as.numeric(as.character(dat$labeled_acc))
dat$heldout_acc = as.numeric(as.character(dat$heldout_acc))
dat$top3_labeled_acc = as.numeric(as.character(dat$top3_labeled_acc))
dat$top3_heldout_acc = as.numeric(as.character(dat$top3_heldout_acc))
if (is.null(dat$dataset_type)){
dat$dataset_type <- dat$corpus
dat$corpus <- NULL
}
num_rows = dim(dat)[1]
# add truncate_unannotated_data=false if it doesn't exist
dat$truncate_unannotated_data <- if (is.null(dat$truncate_unannotated_data)) rep('false',num_rows) else dat$truncate_unannotated_data
# add basedir<-dataset_source if basedir doesn't exist
dat$basedir <- as.character(dat$basedir)
emptyrows <- which(is.na(dat$basedir))
dat$basedir[emptyrows] <- as.character(dat$dataset_source[emptyrows])
dat$basedir <- as.factor(dat$basedir)
########## derive 'algorithm' factor ##################
dat$algorithm <- rep("invalid",num_rows)
# baselines
dat <- nameRows(dat, 'baseline', and(isLabelingStrategy('UBASELINE')))
# itemresp variants
dat <- nameRows(dat, 'itemresp_s', and(isLabelingStrategy('ITEMRESP'), not(isOptimization), not(usesVectors)))
dat <- nameRows(dat, 'itemresp_m', and(isLabelingStrategy('ITEMRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varitemresp', and(isLabelingStrategy('VARITEMRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varitemresp_w2v', and(isLabelingStrategy('VARITEMRESP'), isOptimization, usesWord2VecVectors))
dat <- nameRows(dat, 'varitemresp_d2v', and(isLabelingStrategy('VARITEMRESP'), isOptimization, usesDoc2VecVectors))
# neutered variants
dat <- nameRows(dat, 'momresp_s', and(isLabelingStrategy('MOMRESP'), not(isOptimization), not(usesVectors)))
dat <- nameRows(dat, 'momresp_m', and(isLabelingStrategy('MOMRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varmomresp', and(isLabelingStrategy('VARMOMRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varmomresp_w2v', and(isLabelingStrategy('VARMOMRESP'), isOptimization, usesWord2VecVectors))
dat <- nameRows(dat, 'varmomresp_d2v', and(isLabelingStrategy('VARMOMRESP'), isOptimization, usesDoc2VecVectors))
# multiresp variants
dat <- nameRows(dat, 'multiresp_s', and(isLabelingStrategy('MULTIRESP'), not(isOptimization), not(usesVectors)))
dat <- nameRows(dat, 'multiresp_m', and(isLabelingStrategy('MULTIRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varmultiresp', and(isLabelingStrategy('VARMULTIRESP'), isOptimization, not(usesVectors)))
# logresp variants
dat <- nameRows(dat, 'logresp_st', and(isLabelingStrategy('LOGRESP_ST'), isOptimization, not(usesVectors))) # self training
dat <- nameRows(dat, 'logresp_m', and(isLabelingStrategy('LOGRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varlogresp', and(isLabelingStrategy('VARLOGRESP'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'varlogresp_w2v', and(isLabelingStrategy('VARLOGRESP'), isOptimization, usesWord2VecVectors))
dat <- nameRows(dat, 'varlogresp_d2v', and(isLabelingStrategy('VARLOGRESP'), isOptimization, usesDoc2VecVectors))
dat <- nameRows(dat, 'varlogresp_lda', and(isLabelingStrategy('VARLOGRESP'), isOptimization, usesLdaVectors))
# cslda
dat <- nameRows(dat, 'cslda_s', and(isLabelingStrategy('CSLDA'), not(isOptimization), not(usesVectors)))
dat <- nameRows(dat, 'cslda', and(isLabelingStrategy('CSLDA'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'cslda_lex_s', and(isLabelingStrategy('CSLDALEX'), not(isOptimization), not(usesVectors)))
dat <- nameRows(dat, 'cslda_lex', and(isLabelingStrategy('CSLDALEX'), isOptimization, not(usesVectors)))
dat <- nameRows(dat, 'cslda_p', and(isLabelingStrategy('CSLDAP'), isOptimization, not(usesVectors))) # pipelined
dat <- nameRows(dat, 'cslda_s_p', and(isLabelingStrategy('CSLDAP'), not(isOptimization))) # pipelined w sampler
# fully discriminative
dat <- nameRows(dat, 'discrim', and(isLabelingStrategy('DISCRIM'), isOptimization, not(usesLdaVectors)))
dat <- nameRows(dat, 'discrim_lda', and(isLabelingStrategy('DISCRIM'), isOptimization, usesLdaVectors))
dat <- nameRows(dat, 'discrim_d2v', and(isLabelingStrategy('DISCRIM'), isOptimization, usesDoc2VecVectors))
dat <- nameRows(dat, 'discrim_w2v', and(isLabelingStrategy('DISCRIM'), isOptimization, usesWord2VecVectors))
# make 'algorithm' into factor
dat$algorithm <- factor(dat$algorithm)
########## derive 'corpus' factor ##################
dat$corpus <- rep("OTHER",num_rows)
# combine weather,weather-w2v,weather-d2v
if (any(grepl("weather",dat$basedir))){
dat[which(grepl("weather",dat$basedir)),]$corpus <- "WEATHER"
}
# combine newsgroups,newsgroups-w2v,newsgroups-d2v
if (any(grepl("newsgroups",dat$basedir))){
dat[which(grepl("newsgroups",dat$basedir)),]$corpus <- "NEWSGROUPS"
}
# combine cfgroups1000,cfgroups1000-w2v,cfgroups1000-d2v
if (any(grepl("cfgroups1000",dat$basedir))){
dat[which(grepl("cfgroups1000",dat$basedir)),]$corpus <- "CFGROUPS1000"
}
# combine twitterparaphrase,twitterparaphrase-w2v,twitterparaphrase-d2v
if (any(grepl("twitterparaphrase",dat$basedir))){
dat[which(grepl("twitterparaphrase",dat$basedir)),]$corpus <- "TWITTER_PARA"
}
# combine twittersentiment,twittersentiment-w2v,twittersentiment-d2v
if (any(grepl("twittersentiment",dat$basedir))){
dat[which(grepl("twittersentiment",dat$basedir)),]$corpus <- "TWITTER_SENT"
}
# combine compatibility experiments
if (any(grepl("twittersentiment",dat$basedir))){
dat[which(grepl("compatibility",dat$basedir)),]$corpus <- "COMPATIBILITY"
}
# treat simplified cfgroups as its own corpus
dat$corpus[which(dat$dataset=="cfsimplegroups1000a.json")] <- "CFSIMPLEGROUPS"
# make 'corpus' into factor
dat$corpus <- factor(dat$corpus)
########## miscellaneous ##################
# name num_annotators into a factor (so it can be used as a plotting facet)
if (!is.null(dat$num_annotators)){
dat$num_annotators <- factor(dat$num_annotators)
}
# rename k to 'd' and re-order
require(plyr)
dat$d <- sprintf("d = %g",dat$k)
dat$d <- factor(dat$d, levels = c('d = 1','d = 2','d = 3','d = 5','d = 10'))
# prettify factor names
if (!is.null(dat$inline_hyperparam_tuning)){
dat$tuning <- sprintf("Hyperparam Tuning = %s",dat$inline_hyperparam_tuning)
}
# eta variance -> factor
if (!is.null(dat$eta_variance)){
dat$eta_variance <- factor(dat$eta_variance)
}
# report invalid rows (weren't selected as part of a cohesive algorithm)
#valid_rows = which(dat$algorithm!='invalid')
valid_rows = which(dat$algorithm!='invalid')
print(c("total rows:",num_rows))
print(c("valid rows:",length(valid_rows)))
print(c("invalid rows:",length(which(dat$algorithm=='invalid'))))
return(dat)
}
plotAlgorithms <- function(dat, yvarname, title, xvarname="num_documents_with_annotations", ymin=min(dat[[yvarname]]), ymax=max(dat[[yvarname]]), ylabel="Accuracy", xlabel="Number of annotated instances x %s",
shapesize=1, xlim=NULL, divisor=1000, hide_legend=FALSE, algorithm_colors=NULL, algorithm_shapes=NULL, facets="~corpus~num_annotators~annotator_accuracy",
other_ggplot_elements=NULL, xbreaks=NULL){
# a modified colorblind-friendly pallette from http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/#a-colorblind-friendly-palette
# x tick label formatter
xformatter <- function(x){
# sci <- format(x,scientific=TRUE)
# sci <- gsub('^[0\\.]+e.*','0',sci)
# sci <- gsub('\\.\\de\\+0','e',sci)
# gsub('e\\+0','e',sci)
gsub('\\.0','',format(x))
}
if (is.null(dat$num_documents_with_annotations)){
dat$num_documents_with_annotations <- round(dat$num_annotations / d$k)
}
# what are the variables we should group by when calculating std dev?
groupvars <- strsplit(facets,'~')[[1]][] # use those we are using for for facets
groupvars <- c(groupvars, xvarname, "algorithm") # include the x axis and line identities (algorithm)
groupvars <- groupvars[lapply(groupvars,nchar)>0] # remove empty entries
dfc <- summarySE(dat, measurevar=yvarname, groupvars=groupvars)
if (!is.null(divisor)){
dfc[[xvarname]] <- dfc[[xvarname]]/divisor
if (!is.null(xbreaks)){
xbreaks <- xbreaks/divisor
}
}
# base plot
plt <- ggplot(dat=dfc, aes_string(x=xvarname, y=yvarname, color="algorithm", group="algorithm")) +
ggtitle(title) +
geom_errorbar(aes_string(ymin=sprintf("%s-sd",yvarname), ymax=sprintf("%s+sd",yvarname))) +
geom_line(size=0.8) +
geom_point(aes(shape=algorithm),size=shapesize,color='black') +
ylim(ymin,ymax) +
ylab(ylabel) +
xlab(sprintf(xlabel,format(divisor,big.mark=',',big.interval=3))) +
scale_x_continuous(labels=xformatter) +
theme(plot.title = element_text(lineheight=1.8,face='bold')) +
theme_bw()
# line shapes
if (!is.null(algorithm_colors)){
plt <- plt + scale_colour_manual(values=algorithm_colors)
}
# x breaks
if (!is.null(xbreaks)){
plt <- plt + scale_x_continuous(labels=xformatter, breaks = xbreaks)
}
# line colors
if (!is.null(algorithm_shapes)){
plt <- plt + scale_shape_manual(values=algorithm_shapes)
}
# facets
if (nchar(facets)>0){
plt <- plt + facet_grid(facets)
}
# hide legend
if (hide_legend){
plt <- plt + theme(legend.position='none')
}
# xlim
if (!is.null(xlim)){
plt <- plt + scale_x_continuous(limits=xlim,labels=xformatter)
}
# other
if (!is.null(other_ggplot_elements)){
for (el in other_ggplot_elements){
plt <- plt + el
}
}
return(plt)
}
# setup paths and packages
#install.packages("ggplot2")
require(ggplot2)
setwd('/aml/home/plf1/git/Experiments/plf1/TACL-2015-Vector-submission/csv')
# stop execution --- proceed manually
stop()
# experiments with:
# newsgroups, cfnewsgroups, weather, twittersentiment, twitterparaphrase, compatibility
data = read.csv("2015-06-25-taacl.csv")
#########################################################
# Prototyping
#########################################################
mdata <- massageData(data); d <- mdata
# choose a dataset
d <- mdata; d = mdata[which(mdata$corpus=="NEWSGROUPS"),]
d <- mdata; d = mdata[which(grepl("cfgroups",mdata$dataset)),]
d <- mdata; d = mdata[which(mdata$corpus=="NG"),]
d <- mdata; d = mdata[which(mdata$corpus=="DREDZE"),]
d <- mdata; d = mdata[which(mdata$corpus=="R8"),]
d <- mdata; d = mdata[which(mdata$corpus=="R52"),]
d <- mdata; d = d[which(d$algorithm=="baseline" | d$algorithm=="varmomresp" | d$algorithm=="varlogresp" | d$algorithm=="cslda"),]
d <- mdata; d = d[which(grepl("w2v",d$algorithm) | d$algorithm=="baseline" | d$algorithm=="varmomresp" | d$algorithm=="varlogresp" | d$algorithm=="cslda"),]
d <- mdata; d = d[which(grepl("cslda",d$algorithm)),]
d <- mdata; d = d[which(grepl("d2v",d$algorithm) | d$algorithm=="baseline" | d$algorithm=="varmomresp" | d$algorithm=="varlogresp" | d$algorithm=="cslda"),]
d <- mdata; d = d[which(grepl("lda",d$algorithm) | d$algorithm=="baseline" | d$algorithm=="varmomresp" | d$algorithm=="varlogresp" | d$algorithm=="cslda"),]
d <- mdata; d = d[which(grepl("discrim",d$algorithm)),]
d <- mdata; d = d[which(d$algorithm=="varlogresp" | d$algorithm=="logresp_m" | d$algorithm=="baseline"),]
d <- mdata; d = d[which(grepl("logresp",d$algorithm)),]
d <- mdata; d = d[which(grepl("discrim",d$algorithm)),]
d <- mdata; d = d[which(d$algorithm=="logresp"),]
d <- mdata; d = d[which(d$algorithm=="cslda_s" | grepl("w2v",d$algorithm)),]
d <- mdata; d = d[which(d$algorithm=="varlogresp_w2v" | d$algorithm=="varlogresp_d2v" | d$algorithm=="cslda_s" | d$algorithm=="baseline" | d$algorithm=="varlogresp"),]
facets <- "~annotator_accuracy~corpus~vary_annotator_rates"
xvarname <- "num_annotations"
plotAlgorithms(d,"labeled_acc","Inferred Label Accuracy",ymin=0.,ymax=1,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"unlabeled_acc","Unlabeled Label Accuracy",ymin=0,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"heldout_acc","Test Label Accuracy",ymin=0,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"log_joint","Inferred Label Accuracy",ymin=min(d$log_joint),ymax=max(d$log_joint),facets=facets,xvarname=xvarname)
plotAlgorithms(d,"overall_acc","Overall Accuracy",facets=facets,xvarname=xvarname)
plotAlgorithms(d,"btheta","BTheta",facets=facets,xvarname=xvarname)
plotAlgorithms(d,"bgamma","BGamma",ymin=0,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"cgamma","CGamma",ymin=0,ymax=50,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"bphi","BPhi",ymin=0,ymax=2,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"top3_labeled_acc","Top 3 Labeled Accuracy",ymin=0,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"annacc_rmse","Annotator RMSE",ymin=0,ylabel="Annotator RMSE",facets=facets,xvarname=xvarname)
plotAlgorithms(d,"annacc_mat_rmse","Annotator Matrix RMSE",ymin=0,ymax=.2,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"num_annotations","Inferred Label Accuracy",ymin=0,facets=facets,xvarname=xvarname)
plotAlgorithms(d,"num_documents_with_annotations","Docs with Annotations",ymin=0,facets=facets,xvarname=xvarname)
plot(d$log_joint, d$labeled_acc)
j = d[which(d$algorithm!='itemresp' & d$algorithm!='momresp'),]
plotAlgorithms(j,"machacc_rmse","Machine RMSE",ymin=0)
plotAlgorithms(j,"machacc_mat_rmse","Machine MAT RMSE")
#############################################################################
# Enabling Crowdsourcing with document representations 2015
#############################################################################
height = 9
######################### newsgroups ###############################
data = read.csv("2015-06-25-taacl.csv")
levels=c('LogResp+w2v','LogResp','csLDA','MomResp','ItemResp','Majority') # determined line order in legend
alg_colors=c('csLDA'='#00BEC4', 'Majority'="#000000", 'MomResp'="#B69E00", 'LogResp'="#609BFF", 'LogResp+w2v'="#F8766D", 'ItemResp'='#00B937')
alg_shapes=c('csLDA'=3, 'Majority'=1, 'MomResp'=17, 'LogResp'=18, 'LogResp+w2v'=5, 'ItemResp'=6 )
width = 13
ymin = 0.55
ymax = 1
shapesize = 3
xvarname = "num_annotations"
# data
mdata <- massageData(data);
mdata <- mdata[which(mdata$algorithm=="varlogresp_w2v" | mdata$algorithm=="varlogresp" | mdata$algorithm=="varmomresp" | mdata$algorithm=="cslda_s" | mdata$algorithm=="baseline"),]
mdata$algorithm <- mapvalues(mdata$algorithm, from=c('baseline','varlogresp','varlogresp_w2v','cslda_s','varitemresp','varmomresp'), to=c('Majority','LogResp','LogResp+w2v','csLDA','ItemResp','MomResp')) # rename
mdata$algorithm <- factor(mdata$algorithm, levels=levels) # reorder
plotty <- function(d,hide_legend=FALSE){
plotAlgorithms(d,"labeled_acc","",xvarname=xvarname,ymin=ymin,ymax=ymax,facets="~corpus", shapesize=shapesize, algorithm_colors=alg_colors, algorithm_shapes=alg_shapes,
hide_legend=hide_legend,xlabel="Number of annotations x 1,000")
}
plotty(mdata[which(mdata$corpus=="NEWSGROUPS"),])
ggsave("../images/newsgroups.eps",width=width,height=height,units='cm')
######################### cfgroups1000 ###############################
data = read.csv("2015-06-25-taacl.csv")
levels=c('LogResp+w2v','LogResp','csLDA','MomResp','ItemResp','Majority') # determined line order in legend
alg_colors=c('csLDA'='#00BEC4', 'Majority'="#000000", 'MomResp'="#B69E00", 'LogResp'="#609BFF", 'LogResp+w2v'="#F8766D", 'ItemResp'='#00B937')
alg_shapes=c('csLDA'=3, 'Majority'=1, 'MomResp'=17, 'LogResp'=18, 'LogResp+w2v'=5, 'ItemResp'=6 )
width = 13
ymin = 0.0
ymax = 0.75
shapesize = 3
xvarname = "num_annotations"
# data
mdata <- massageData(data);
mdata <- mdata[which(mdata$algorithm=="varlogresp_w2v" | mdata$algorithm=="varlogresp" | mdata$algorithm=="varmomresp" | mdata$algorithm=="cslda_s" | mdata$algorithm=="baseline"),]
mdata$algorithm <- mapvalues(mdata$algorithm, from=c('baseline','varlogresp','varlogresp_w2v','cslda_s','varitemresp','varmomresp'), to=c('Majority','LogResp','LogResp+w2v','csLDA','ItemResp','MomResp')) # rename
mdata$algorithm <- factor(mdata$algorithm, levels=levels) # reorder
plotty <- function(d,hide_legend=FALSE){
plotAlgorithms(d,"labeled_acc","",xvarname=xvarname,ymin=ymin,ymax=ymax,facets="~corpus~dataset", shapesize=shapesize, algorithm_colors=alg_colors, algorithm_shapes=alg_shapes,
hide_legend=hide_legend,xlabel="Number of annotations x 1,000")
}
plotty(mdata[which(mdata$corpus=="CFGROUPS1000"),])
ggsave("../images/cfgroups1000.eps",width=width,height=height,units='cm')
##################################### general tweet sentiment ###################################
data = read.csv("2015-06-25-taacl.csv")
levels=c('LogResp+w2v','LogResp','csLDA','MomResp','ItemResp','Majority') # determined line order in legend
alg_colors=c('csLDA'='#00BEC4', 'Majority'="#000000", 'MomResp'="#B69E00", 'LogResp'="#609BFF", 'LogResp+w2v'="#F8766D", 'ItemResp'='#00B937')
alg_shapes=c('csLDA'=3, 'Majority'=1, 'MomResp'=17, 'LogResp'=18, 'LogResp+w2v'=5, 'ItemResp'=6 )
width = 13
ymin = 0.6
ymax = 0.8
shapesize = 3
xvarname = "num_annotations"
# data
mdata <- massageData(data); mdata <- mdata[which(!grepl("itemresp_",mdata$algorithm)),] # strip useless itemresp_w2v
# mdata <- mdata[which(grepl("discrim",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("itemresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("momresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("logresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("cslda",mdata$algorithm) | mdata$algorithm=="baseline"),]
mdata <- mdata[which(mdata$algorithm=="varlogresp_w2v" | mdata$algorithm=="varlogresp" | mdata$algorithm=="cslda_s" | mdata$algorithm=="varmomresp" | mdata$algorithm=="varitemresp" | mdata$algorithm=="baseline"),]
mdata$algorithm <- mapvalues(mdata$algorithm, from=c('baseline','varlogresp','varlogresp_w2v','cslda_s','varitemresp','varmomresp'), to=c('Majority','LogResp','LogResp+w2v','csLDA','ItemResp','MomResp')) # rename
mdata$algorithm <- factor(mdata$algorithm, levels=levels) # reorder
plotty <- function(d,hide_legend=FALSE){
plotAlgorithms(d,"labeled_acc","",xvarname=xvarname,ymin=ymin,ymax=ymax,facets="~corpus", shapesize=shapesize, algorithm_colors=alg_colors, algorithm_shapes=alg_shapes,
hide_legend=hide_legend,xlabel="Number of annotations x 1,000")
}
plotty(mdata[which(mdata$corpus=="TWITTER_SENT"),])
ggsave("../images/twittersentiment.eps",width=width,height=height,units='cm')
######################################## weather tweet sentiment #######################################
data = read.csv("2015-06-25-taacl.csv")
levels=c('LogResp+w2v','LogResp','csLDA','MomResp','ItemResp','Majority') # determined line order in legend
alg_colors=c('csLDA'='#00BEC4', 'Majority'="#000000", 'MomResp'="#B69E00", 'LogResp'="#609BFF", 'LogResp+w2v'="#F8766D", 'ItemResp'='#00B937')
alg_shapes=c('csLDA'=3, 'Majority'=1, 'MomResp'=17, 'LogResp'=18, 'LogResp+w2v'=5, 'ItemResp'=6 )
width = 13
ymin = 0.55
ymax = 1
shapesize = 3
xvarname = "num_annotations"
# data
mdata <- massageData(data); mdata <- mdata[which(!grepl("itemresp_",mdata$algorithm)),] # strip useless itemresp_w2v
# mdata <- mdata[which(grepl("discrim",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("itemresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("momresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("logresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("cslda",mdata$algorithm) | mdata$algorithm=="baseline"),]
mdata <- mdata[which(mdata$algorithm=="varlogresp_w2v" | mdata$algorithm=="varlogresp" | mdata$algorithm=="cslda_s" | mdata$algorithm=="varmomresp" | mdata$algorithm=="varitemresp" | mdata$algorithm=="baseline"),]
mdata$algorithm <- mapvalues(mdata$algorithm, from=c('baseline','varlogresp','varlogresp_w2v','cslda_s','varitemresp','varmomresp'), to=c('Majority','LogResp','LogResp+w2v','csLDA','ItemResp','MomResp')) # rename
mdata$algorithm <- factor(mdata$algorithm, levels=levels) # reorder
plotty <- function(d,hide_legend=FALSE){
plotAlgorithms(d,"labeled_acc","",xvarname=xvarname,ymin=ymin,ymax=ymax,facets="~corpus", shapesize=shapesize, algorithm_colors=alg_colors, algorithm_shapes=alg_shapes,
hide_legend=hide_legend,xlabel="Number of annotations x 1,000")
}
plotty(mdata[which(mdata$corpus=="WEATHER"),])
ggsave("../images/weather.eps",width=width,height=height,units='cm')
######################################## compatibility #######################################
data = read.csv("2015-06-25-taacl.csv")
levels=c('LogResp+w2v','LogResp','csLDA','MomResp','ItemResp','Majority') # determined line order in legend
alg_colors=c('csLDA'='#00BEC4', 'Majority'="#000000", 'MomResp'="#B69E00", 'LogResp'="#609BFF", 'LogResp+w2v'="#F8766D", 'ItemResp'='#00B937')
alg_shapes=c('csLDA'=3, 'Majority'=1, 'MomResp'=17, 'LogResp'=18, 'LogResp+w2v'=5, 'ItemResp'=6 )
width = 13
ymin = 0.93
ymax = 1
shapesize = 3
xvarname = "num_annotations"
# data
mdata <- massageData(data); mdata$algorithm[which(mdata$algorithm=="varitemresp_w2v")] <- "varitemresp" # strip useless itemresp_w2v
# mdata <- mdata[which(grepl("discrim",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("itemresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("momresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("logresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("cslda",mdata$algorithm) | mdata$algorithm=="baseline"),]
mdata <- mdata[which(mdata$algorithm=="varlogresp_w2v" | mdata$algorithm=="varitemresp" | mdata$algorithm=="baseline"),]
mdata <- mdata[which(mdata$num_annotations>5000),]
mdata$algorithm <- mapvalues(mdata$algorithm, from=c('baseline','varlogresp','varlogresp_w2v','cslda_s','varitemresp','varmomresp'), to=c('Majority','LogResp','LogResp+w2v','csLDA','ItemResp','MomResp')) # rename
mdata$algorithm <- factor(mdata$algorithm, levels=levels) # reorder
plotty <- function(d,hide_legend=FALSE){
plotAlgorithms(d,"labeled_acc","",xvarname=xvarname,ymin=ymin,ymax=ymax,facets="~corpus", shapesize=shapesize, algorithm_colors=alg_colors, algorithm_shapes=alg_shapes,
hide_legend=hide_legend,xlabel="Number of annotations x 1,000")
}
plotty(mdata[which(mdata$corpus=="COMPATIBILITY"),])
ggsave("../images/compatibility.eps",width=width,height=height,units='cm')
############################################# tweet paraphrase dataset ##############################################################
data = read.csv("2015-06-25-taacl.csv")
levels=c('LogResp+w2v','LogResp','csLDA','MomResp','ItemResp','Majority') # determined line order in legend
alg_colors=c('csLDA'='#00BEC4', 'Majority'="#000000", 'MomResp'="#B69E00", 'LogResp'="#609BFF", 'LogResp+w2v'="#F8766D", 'ItemResp'='#00B937')
alg_shapes=c('csLDA'=3, 'Majority'=1, 'MomResp'=17, 'LogResp'=18, 'LogResp+w2v'=5, 'ItemResp'=6 )
width = 13
ymin = 0.75
ymax = 0.95
shapesize = 3
xvarname = "num_annotations"
# data
mdata <- massageData(data); mdata <- mdata[which(!grepl("itemresp_",mdata$algorithm)),] # strip useless itemresp_w2v
# mdata <- mdata[which(grepl("discrim",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("itemresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("momresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("logresp",mdata$algorithm) | mdata$algorithm=="baseline"),]
# mdata <- mdata[which(grepl("cslda",mdata$algorithm) | mdata$algorithm=="baseline"),]
mdata <- mdata[which(mdata$algorithm=="varlogresp_w2v" | mdata$algorithm=="varitemresp" | mdata$algorithm=="baseline"),]
mdata$algorithm <- mapvalues(mdata$algorithm, from=c('baseline','varlogresp','varlogresp_w2v','cslda_s','varitemresp','varmomresp'), to=c('Majority','LogResp','LogResp+w2v','csLDA','ItemResp','MomResp')) # rename
mdata$algorithm <- factor(mdata$algorithm, levels=levels) # reorder
plotty <- function(d,hide_legend=FALSE){
plotAlgorithms(d,"labeled_acc","",xvarname=xvarname,ymin=ymin,ymax=ymax,facets="~corpus", shapesize=shapesize, algorithm_colors=alg_colors, algorithm_shapes=alg_shapes,
hide_legend=hide_legend,xlabel="Number of annotations x 1,000")
}
plotty(mdata[which(mdata$corpus=="TWITTER_PARA"),])
ggsave("../images/twitterparaphrase.eps",width=width,height=height,units='cm')
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "fri_c3_1000_10")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.rda", par.vals = list(), predict.type = "prob")
#:# hash
#:# c2730ddcd6a3910626abc7206f7f8e33
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_fri_c3_1000_10/classification_binaryClass/c2730ddcd6a3910626abc7206f7f8e33/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 691
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "fri_c3_1000_10")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.rda", par.vals = list(), predict.type = "prob")
#:# hash
#:# c2730ddcd6a3910626abc7206f7f8e33
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/H5constants.R
\name{h5types}
\alias{h5types}
\title{These are all types that are used in HDF5}
\description{
HDF5 provides many native datatypes. These are all stored in the \code{h5types}
environment. An overview of all available types can be seen using \code{h5types$overview}.
Any specific type can be accessed using the \code{$}-operator. See also the examples below.
}
\examples{
h5types$overview
h5types$H5T_NATIVE_INT
h5types$H5T_NATIVE_DOUBLE
}
\author{
Holger Hoefling
}
|
/man/h5types.Rd
|
permissive
|
hhoeflin/hdf5r
|
R
| false
| true
| 558
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/H5constants.R
\name{h5types}
\alias{h5types}
\title{These are all types that are used in HDF5}
\description{
HDF5 provides many native datatypes. These are all stored in the \code{h5types}
environment. An overview of all available types can be seen using \code{h5types$overview}.
Any specific type can be accessed using the \code{$}-operator. See also the examples below.
}
\examples{
h5types$overview
h5types$H5T_NATIVE_INT
h5types$H5T_NATIVE_DOUBLE
}
\author{
Holger Hoefling
}
|
#!/usr/bin/env Rscript
##
## Copyright (C) 2016 Ezequiel Miron <eze.miron@bioch.ox.ac.uk>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
dirchosen <- readline(prompt = "This script should be run from the results directory as the working directory. \n ie: \"~/Documents/papers/chrom_marks/results\".\nIf already in results directory hit enter\n")
dirchosen <- getwd()
##celltype = "Cardiomyocytes"
##condition = "37C"
celltype = "C127"
condition = "G1"
topPATH = paste(celltype, condition, sep = "/")
dir_names <- list.dirs(topPATH, recursive = TRUE)
#dir_names <- list.dirs("C127/sub2")
dir_names <- dir_names[2:length(dir_names)]
var_names <- list.dirs(topPATH, recursive = TRUE, full.names=FALSE)
#var_names <- list.dirs("C127/sub2", full.names=FALSE)
var_names <- var_names[2:length(var_names)]
for (dir_name in dir_names){
newdir <- paste(dirchosen, dir_name, sep ="/")
setwd(newdir)
dir_num <- which(dir_names == dir_name)
var_name <- var_names[dir_num]
stddist <- seq(-400,400)
df <- as.data.frame(stddist)
df2a <- as.data.frame(NA)
df2b <- df2a
df3a <- df2a
df3b <- df2a
df4 <- df
# names(df2) <- c(NA,NA)
allfilesandpaths <- list.files(pattern= var_name, full.names=TRUE);
if (length(allfilesandpaths) > 0){
###########################
#for normalised d2b only:
filesandpaths <- grep("*_-d2bnorm.csv",allfilesandpaths, value = TRUE)
if (length(filesandpaths) > 0){
for (fileandpath in filesandpaths){
if (length(fileandpath) > 0){
d2b <- read.csv(fileandpath, header=TRUE,colClasses=c("NULL","NULL","NULL","NULL",NA,"NULL"))
# cell_num <- which(filesandpaths == fileandpath)
# d2b$cell <- cell_num
# names(d2b) <- rep(NA, ncol(d2b))
df <- cbind(df,d2b)
}
}
df <- transform(df, AvNorm = rowMeans(df[,2:length(df)], na.rm = TRUE))
#log base 2 of the average:
df <- transform(df, Log2AvNorm = log2(df$AvNorm))
#standard deviation of the average:
df <- transform(df, StDv = apply (df[,2:(length(df)-2)], 1, sd, na.rm =TRUE))
#95% confidence interval:
df <- transform(df, CI95 = apply(df[,2:(length(df)-3)], 1, function(x){1.96*sd(x)/sqrt(length(x))}))
#lower and upper 95% confidence intervals:
df <- transform(df, negCI95 = apply(df[,2:(length(df)-4)], 1, function(x){mean(x)+(-1.96)*sd(x)/sqrt(length(x))}))
df <- transform(df, posCI95 = apply(df[,2:(length(df)-5)], 1, function(x){mean(x)+c(1.96)*sd(x)/sqrt(length(x))}))
#lower and upper log errors for the log average from the 95CIs:
df <- transform(df, lowLogErr = df$Log2AvNorm - sqrt((df$Log2AvNorm-log2(df$negCI95))^2))
df <- transform(df, uprLogErr = df$Log2AvNorm + sqrt((df$Log2AvNorm-log2(df$posCI95))^2))
##to plot averaged then logged results with shaded error bands:
#library(ggplot2)
#p<-ggplot(data=df, aes(x=df$stddist, y=df$Log2AvNorm)) + geom_line()
#p<-p+geom_ribbon(aes(ymin=df$lowLogErr, ymax=df$uprLogErr), linetype=2, alpha=0.1)
#p
#p<-ggplot(data=data, aes(x=interval, y=OR, colour=Drug)) + geom_point() + geom_line()
#p<-p+geom_ribbon(aes(ymin=data$lower, ymax=data$upper), linetype=2, alpha=0.1)
savename <- paste0(var_name,"_mask_d2bnorm-summary.csv")
write.csv(df,savename);
}
###########################
#for absolute marker and random d2b only:
filesandpaths2 <- grep("_d2bfit.csv",allfilesandpaths, value = TRUE)
if (length(filesandpaths2) > 0){
numcells <- 0
for (fileandpath2 in filesandpaths2){
if (length(fileandpath2) > 0){
fitdf <- read.csv(fileandpath2, header=TRUE)
# cell_num <- which(filesandpaths == fileandpath)
# d2b$cell <- cell_num
df2a <- rbind(df2a, fitdf$mfitted[1])
df2b <- rbind(df2b, fitdf$mfitted[2])
df3a <- rbind(df3a, fitdf$rfitted[1])
df3b <- rbind(df3b, fitdf$rfitted[2])
numcells <- numcells + 1
}
}
Markermu <- mean(df2a[2:nrow(df2a),1])
Randommu <- mean(df3a[2:nrow(df3a),1])
#to get the average standard deviation you have to: square them to get the variance for each, then get their average (ie, sum them and divide by the number of inputs) and finally square root them. This is NOT the same as just taking the mean of all the standard deviations!
Markersigma <- sqrt(sum(df2b[2:nrow(df2b),1]^2)/(nrow(df2b)-1))
Randomsigma <- sqrt(sum(df3b[2:nrow(df3b),1]^2)/(nrow(df3b)-1))
Mabsdf <- cbind(Markermu,Markersigma,numcells)
rownames(Mabsdf) <- var_name
Rabsdf <- cbind(Randommu,Randomsigma,numcells)
rownames(Rabsdf) <- var_name
savename2 <- paste0(var_name,"_mask_abs-md2b_summary.csv")
write.csv(Mabsdf,savename2);
savename3 <- paste0(var_name,"_mask_abs-rd2b_summary.csv")
write.csv(Rabsdf,savename3);
}
###########################
#for POSITVE fitted random d2b only, metric for chromatin network width:
filesandpaths <- grep("*_d2bnorm.csv",allfilesandpaths, value = TRUE)
if (length(filesandpaths) > 0){
for (fileandpath in filesandpaths){
if (length(fileandpath) > 0){
d2b <- read.csv(fileandpath, header=TRUE,colClasses=c("NULL","NULL","NULL",NA,"NULL","NULL"))
# cell_num <- which(filesandpaths == fileandpath)
# d2b$cell <- cell_num
# names(d2b) <- rep(NA, ncol(d2b))
df4 <- cbind(df4,d2b)
}
}
#remove negative distances:
df4ind <- df4$stddist > -1
df4 <- df4[df4ind,]
df4 <- transform(df4, AvNetwork = rowMeans(df4[,2:length(df4)], na.rm = TRUE))
df4 <- transform(df4, StDv = apply (df4[,2:(length(df4)-1)], 1, sd, na.rm =TRUE))
#95% confidence interval:
df4 <- transform(df4, CI95 = apply(df4[,2:(length(df4)-2)], 1, function(x){1.96*sd(x)/sqrt(length(x))}))
#lower and upper 95% confidence intervals:
df4 <- transform(df4, negCI95 = apply(df4[,2:(length(df4)-3)], 1, function(x){mean(x)+(-1.96)*sd(x)/sqrt(length(x))}))
df4 <- transform(df4, posCI95 = apply(df4[,2:(length(df4)-4)], 1, function(x){mean(x)+c(1.96)*sd(x)/sqrt(length(x))}))
##to plot averaged then logged results with shaded error bands:
#library(ggplot2)
#p<-ggplot(data=df, aes(x=df$stddist, y=df$Log2AvNorm)) + geom_line()
#p<-p+geom_ribbon(aes(ymin=df$lowLogErr, ymax=df$uprLogErr), linetype=2, alpha=0.1)
#p
#p<-ggplot(data=data, aes(x=interval, y=OR, colour=Drug)) + geom_point() + geom_line()
#p<-p+geom_ribbon(aes(ymin=data$lower, ymax=data$upper), linetype=2, alpha=0.1)
savename <- paste0(var_name,"_mask_network-summary.csv")
write.csv(df4,savename);
}
}
}
|
/3chan/ro_scripts/S-phase/summaryd2b.R
|
no_license
|
ezemiron/Chain
|
R
| false
| false
| 7,490
|
r
|
#!/usr/bin/env Rscript
##
## Copyright (C) 2016 Ezequiel Miron <eze.miron@bioch.ox.ac.uk>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
dirchosen <- readline(prompt = "This script should be run from the results directory as the working directory. \n ie: \"~/Documents/papers/chrom_marks/results\".\nIf already in results directory hit enter\n")
dirchosen <- getwd()
##celltype = "Cardiomyocytes"
##condition = "37C"
celltype = "C127"
condition = "G1"
topPATH = paste(celltype, condition, sep = "/")
dir_names <- list.dirs(topPATH, recursive = TRUE)
#dir_names <- list.dirs("C127/sub2")
dir_names <- dir_names[2:length(dir_names)]
var_names <- list.dirs(topPATH, recursive = TRUE, full.names=FALSE)
#var_names <- list.dirs("C127/sub2", full.names=FALSE)
var_names <- var_names[2:length(var_names)]
for (dir_name in dir_names){
newdir <- paste(dirchosen, dir_name, sep ="/")
setwd(newdir)
dir_num <- which(dir_names == dir_name)
var_name <- var_names[dir_num]
stddist <- seq(-400,400)
df <- as.data.frame(stddist)
df2a <- as.data.frame(NA)
df2b <- df2a
df3a <- df2a
df3b <- df2a
df4 <- df
# names(df2) <- c(NA,NA)
allfilesandpaths <- list.files(pattern= var_name, full.names=TRUE);
if (length(allfilesandpaths) > 0){
###########################
#for normalised d2b only:
filesandpaths <- grep("*_-d2bnorm.csv",allfilesandpaths, value = TRUE)
if (length(filesandpaths) > 0){
for (fileandpath in filesandpaths){
if (length(fileandpath) > 0){
d2b <- read.csv(fileandpath, header=TRUE,colClasses=c("NULL","NULL","NULL","NULL",NA,"NULL"))
# cell_num <- which(filesandpaths == fileandpath)
# d2b$cell <- cell_num
# names(d2b) <- rep(NA, ncol(d2b))
df <- cbind(df,d2b)
}
}
df <- transform(df, AvNorm = rowMeans(df[,2:length(df)], na.rm = TRUE))
#log base 2 of the average:
df <- transform(df, Log2AvNorm = log2(df$AvNorm))
#standard deviation of the average:
df <- transform(df, StDv = apply (df[,2:(length(df)-2)], 1, sd, na.rm =TRUE))
#95% confidence interval:
df <- transform(df, CI95 = apply(df[,2:(length(df)-3)], 1, function(x){1.96*sd(x)/sqrt(length(x))}))
#lower and upper 95% confidence intervals:
df <- transform(df, negCI95 = apply(df[,2:(length(df)-4)], 1, function(x){mean(x)+(-1.96)*sd(x)/sqrt(length(x))}))
df <- transform(df, posCI95 = apply(df[,2:(length(df)-5)], 1, function(x){mean(x)+c(1.96)*sd(x)/sqrt(length(x))}))
#lower and upper log errors for the log average from the 95CIs:
df <- transform(df, lowLogErr = df$Log2AvNorm - sqrt((df$Log2AvNorm-log2(df$negCI95))^2))
df <- transform(df, uprLogErr = df$Log2AvNorm + sqrt((df$Log2AvNorm-log2(df$posCI95))^2))
##to plot averaged then logged results with shaded error bands:
#library(ggplot2)
#p<-ggplot(data=df, aes(x=df$stddist, y=df$Log2AvNorm)) + geom_line()
#p<-p+geom_ribbon(aes(ymin=df$lowLogErr, ymax=df$uprLogErr), linetype=2, alpha=0.1)
#p
#p<-ggplot(data=data, aes(x=interval, y=OR, colour=Drug)) + geom_point() + geom_line()
#p<-p+geom_ribbon(aes(ymin=data$lower, ymax=data$upper), linetype=2, alpha=0.1)
savename <- paste0(var_name,"_mask_d2bnorm-summary.csv")
write.csv(df,savename);
}
###########################
#for absolute marker and random d2b only:
filesandpaths2 <- grep("_d2bfit.csv",allfilesandpaths, value = TRUE)
if (length(filesandpaths2) > 0){
numcells <- 0
for (fileandpath2 in filesandpaths2){
if (length(fileandpath2) > 0){
fitdf <- read.csv(fileandpath2, header=TRUE)
# cell_num <- which(filesandpaths == fileandpath)
# d2b$cell <- cell_num
df2a <- rbind(df2a, fitdf$mfitted[1])
df2b <- rbind(df2b, fitdf$mfitted[2])
df3a <- rbind(df3a, fitdf$rfitted[1])
df3b <- rbind(df3b, fitdf$rfitted[2])
numcells <- numcells + 1
}
}
Markermu <- mean(df2a[2:nrow(df2a),1])
Randommu <- mean(df3a[2:nrow(df3a),1])
#to get the average standard deviation you have to: square them to get the variance for each, then get their average (ie, sum them and divide by the number of inputs) and finally square root them. This is NOT the same as just taking the mean of all the standard deviations!
Markersigma <- sqrt(sum(df2b[2:nrow(df2b),1]^2)/(nrow(df2b)-1))
Randomsigma <- sqrt(sum(df3b[2:nrow(df3b),1]^2)/(nrow(df3b)-1))
Mabsdf <- cbind(Markermu,Markersigma,numcells)
rownames(Mabsdf) <- var_name
Rabsdf <- cbind(Randommu,Randomsigma,numcells)
rownames(Rabsdf) <- var_name
savename2 <- paste0(var_name,"_mask_abs-md2b_summary.csv")
write.csv(Mabsdf,savename2);
savename3 <- paste0(var_name,"_mask_abs-rd2b_summary.csv")
write.csv(Rabsdf,savename3);
}
###########################
#for POSITVE fitted random d2b only, metric for chromatin network width:
filesandpaths <- grep("*_d2bnorm.csv",allfilesandpaths, value = TRUE)
if (length(filesandpaths) > 0){
for (fileandpath in filesandpaths){
if (length(fileandpath) > 0){
d2b <- read.csv(fileandpath, header=TRUE,colClasses=c("NULL","NULL","NULL",NA,"NULL","NULL"))
# cell_num <- which(filesandpaths == fileandpath)
# d2b$cell <- cell_num
# names(d2b) <- rep(NA, ncol(d2b))
df4 <- cbind(df4,d2b)
}
}
#remove negative distances:
df4ind <- df4$stddist > -1
df4 <- df4[df4ind,]
df4 <- transform(df4, AvNetwork = rowMeans(df4[,2:length(df4)], na.rm = TRUE))
df4 <- transform(df4, StDv = apply (df4[,2:(length(df4)-1)], 1, sd, na.rm =TRUE))
#95% confidence interval:
df4 <- transform(df4, CI95 = apply(df4[,2:(length(df4)-2)], 1, function(x){1.96*sd(x)/sqrt(length(x))}))
#lower and upper 95% confidence intervals:
df4 <- transform(df4, negCI95 = apply(df4[,2:(length(df4)-3)], 1, function(x){mean(x)+(-1.96)*sd(x)/sqrt(length(x))}))
df4 <- transform(df4, posCI95 = apply(df4[,2:(length(df4)-4)], 1, function(x){mean(x)+c(1.96)*sd(x)/sqrt(length(x))}))
##to plot averaged then logged results with shaded error bands:
#library(ggplot2)
#p<-ggplot(data=df, aes(x=df$stddist, y=df$Log2AvNorm)) + geom_line()
#p<-p+geom_ribbon(aes(ymin=df$lowLogErr, ymax=df$uprLogErr), linetype=2, alpha=0.1)
#p
#p<-ggplot(data=data, aes(x=interval, y=OR, colour=Drug)) + geom_point() + geom_line()
#p<-p+geom_ribbon(aes(ymin=data$lower, ymax=data$upper), linetype=2, alpha=0.1)
savename <- paste0(var_name,"_mask_network-summary.csv")
write.csv(df4,savename);
}
}
}
|
library(ggplot2)
library(data.table)
library(reshape2)
library(dplyr)
load("suicides.rdata")
suicides$age <- as.factor(suicides$age)
all_suicides <- copy(suicides)
suicides <- suicides %>%
group_by(year, age, sex) %>%
mutate(deaths = sum(deaths))
# Make a line plot of suicides by age
# (year on the x axis, deaths on the y axis, different line for each age).
# facet by sex.
line_by_age <- ggplot(suicides, aes(x=year, y=deaths, color=age)) +
geom_line(size = 2) +
facet_wrap(~sex, scales = "free")
##extra credit####
one_state <- all_suicides[all_suicides$state=="Uttar Pradesh"] %>%
group_by(year, state, sex, age, means) %>%
mutate(deaths = sum(deaths))
# Make a set of density plots faceted by sex and means of suicide,
# showing distributions of suicides by age, for the state of Uttar Pradesh.
# Label appropriately.
|
/ggplot/ggplot_exercise.r
|
no_license
|
hayleyyounghubby/intro_to_r
|
R
| false
| false
| 855
|
r
|
library(ggplot2)
library(data.table)
library(reshape2)
library(dplyr)
load("suicides.rdata")
suicides$age <- as.factor(suicides$age)
all_suicides <- copy(suicides)
suicides <- suicides %>%
group_by(year, age, sex) %>%
mutate(deaths = sum(deaths))
# Make a line plot of suicides by age
# (year on the x axis, deaths on the y axis, different line for each age).
# facet by sex.
line_by_age <- ggplot(suicides, aes(x=year, y=deaths, color=age)) +
geom_line(size = 2) +
facet_wrap(~sex, scales = "free")
##extra credit####
one_state <- all_suicides[all_suicides$state=="Uttar Pradesh"] %>%
group_by(year, state, sex, age, means) %>%
mutate(deaths = sum(deaths))
# Make a set of density plots faceted by sex and means of suicide,
# showing distributions of suicides by age, for the state of Uttar Pradesh.
# Label appropriately.
|
# Definindo o diretório de trabalho
setwd('Z:/FCD/BigDataRAzure/Cap22/Projeto1')
getwd()
|
/fraud-detection.R
|
no_license
|
edipo89/fraud-detection
|
R
| false
| false
| 89
|
r
|
# Definindo o diretório de trabalho
setwd('Z:/FCD/BigDataRAzure/Cap22/Projeto1')
getwd()
|
/NoteBook_R/Vedio_Note_炼数成金/day_01_综合型案例.R
|
no_license
|
moss1225/R_practice_program_1
|
R
| false
| false
| 1,478
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/noise_method.R
\docType{methods}
\name{noise}
\alias{noise}
\alias{noise,DocumentTermMatrix-method}
\alias{noise,TermDocumentMatrix-method}
\alias{noise,character-method}
\alias{noise,textstat-method}
\title{detect noise}
\usage{
noise(.Object, ...)
\S4method{noise}{DocumentTermMatrix}(.Object, minTotal = 2,
minTfIdfMean = 0.005, sparse = 0.995, stopwordsLanguage = "german",
minNchar = 2, specialChars = getOption("polmineR.specialChars"),
numbers = "^[0-9\\\\.,]+$", verbose = TRUE)
\S4method{noise}{TermDocumentMatrix}(.Object, ...)
\S4method{noise}{character}(.Object, stopwordsLanguage = "german",
minNchar = 2, specialChars = getOption("polmineR.specialChars"),
numbers = "^[0-9\\\\.,]+$", verbose = TRUE)
\S4method{noise}{textstat}(.Object, pAttribute, ...)
}
\arguments{
\item{.Object}{an .Object of class \code{"DocumentTermMatrix"}}
\item{...}{further parameters}
\item{minTotal}{minimum colsum (for DocumentTermMatrix) to qualify a term as non-noise}
\item{minTfIdfMean}{minimum mean value for tf-idf to qualify a term as non-noise}
\item{sparse}{will be passed into \code{"removeSparseTerms"} from \code{"tm"}-package}
\item{stopwordsLanguage}{e.g. "german", to get stopwords defined in the tm package}
\item{minNchar}{min char length ti qualify a term as non-noise}
\item{specialChars}{special characters to drop}
\item{numbers}{regex, to drop numbers}
\item{verbose}{logical}
\item{pAttribute}{relevant if applied to a textstat object}
}
\value{
a list
}
\description{
detect noise
}
|
/man/noise.Rd
|
no_license
|
stefan-mueller/polmineR
|
R
| false
| true
| 1,603
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/noise_method.R
\docType{methods}
\name{noise}
\alias{noise}
\alias{noise,DocumentTermMatrix-method}
\alias{noise,TermDocumentMatrix-method}
\alias{noise,character-method}
\alias{noise,textstat-method}
\title{detect noise}
\usage{
noise(.Object, ...)
\S4method{noise}{DocumentTermMatrix}(.Object, minTotal = 2,
minTfIdfMean = 0.005, sparse = 0.995, stopwordsLanguage = "german",
minNchar = 2, specialChars = getOption("polmineR.specialChars"),
numbers = "^[0-9\\\\.,]+$", verbose = TRUE)
\S4method{noise}{TermDocumentMatrix}(.Object, ...)
\S4method{noise}{character}(.Object, stopwordsLanguage = "german",
minNchar = 2, specialChars = getOption("polmineR.specialChars"),
numbers = "^[0-9\\\\.,]+$", verbose = TRUE)
\S4method{noise}{textstat}(.Object, pAttribute, ...)
}
\arguments{
\item{.Object}{an .Object of class \code{"DocumentTermMatrix"}}
\item{...}{further parameters}
\item{minTotal}{minimum colsum (for DocumentTermMatrix) to qualify a term as non-noise}
\item{minTfIdfMean}{minimum mean value for tf-idf to qualify a term as non-noise}
\item{sparse}{will be passed into \code{"removeSparseTerms"} from \code{"tm"}-package}
\item{stopwordsLanguage}{e.g. "german", to get stopwords defined in the tm package}
\item{minNchar}{min char length ti qualify a term as non-noise}
\item{specialChars}{special characters to drop}
\item{numbers}{regex, to drop numbers}
\item{verbose}{logical}
\item{pAttribute}{relevant if applied to a textstat object}
}
\value{
a list
}
\description{
detect noise
}
|
# Copyright (c) 2020 Université catholique de Louvain Center for Operations Research and Econometrics (CORE) http://www.uclouvain.be
# Written by Pr Bart Jourquin, bart.jourquin@uclouvain.be
#
# A few convenience functions used in more than one script in this project
#
# Test if all the estimators are of the expected sign (must be negative)
allSignsAreExpected <- function(model) {
c <- coef(model)
correctSign <- TRUE
# Browse de coefficients names (see output of "summary(model)")
for (j in 1:length(c)) {
name <- names(c[j])
if (substring(name, 1, 1) != "(") {
# "(Intercept)" must not be tested
if (c[name] > 0) {
correctSign <- FALSE
break
}
}
}
return(correctSign)
}
# Get Pr(>|t|) of model for all coefficients
allCoefsAreSignificant <- function(model, nbStars, withSignificantIntercepts) {
b <- coef(model, order = TRUE)
std.err2 <- sqrt(diag(vcov(model)))
std.err <- b
std.err[names(std.err2)] <- std.err2
z <- b / std.err
p <- 2 * (1 - pnorm(abs(z)))
# Test or not the signif.level of the 2 intercepts
startIdx <- 3
if (withSignificantIntercepts) {
startIdx <- 1
}
startIdx = 1
for (j in startIdx:length(p)) {
if (nbStars == 3 && p[j] > 0.001) {
return(FALSE)
}
if (nbStars == 2 && p[j] > 0.01) {
return(FALSE)
}
if (nbStars == 1 && p[j] > 0.05) {
return(FALSE)
}
if (nbStars == 0 && p[j] > 0.1) {
return(FALSE)
}
if (nbStars == 0 && p[j] > 1) {
return(FALSE)
}
}
return(TRUE)
}
# Get Pr(>|t|) of model for a given coefficient
getStars <- function(model, coefName) {
b <- coef(model, order = TRUE)
std.err2 <- sqrt(diag(vcov(model)))
std.err <- b
std.err[names(std.err2)] <- std.err2
z <- b / std.err
p <- 2 * (1 - pnorm(abs(z)))
pp <- p[coefName]
if (pp <= 0.001) {
return("***")
}
if (pp <= 0.01) {
return("**")
}
if (pp <= 0.05) {
return("*")
}
if (pp <= 0.1) {
return(".")
}
if (pp <= 1) {
return(" ")
}
}
# Returns TRUE if signs are expected and all the estimators are enough significant
isValid <- function(solution) {
# When looking in stored results (HeuristicVsBruteForce.R)
if("keep" %in% colnames(solution)) {
return (solution$keep)
}
# When used in "real" conditions (Heuristic.R)
if (solution$error == "") {
return(TRUE)
}
return(FALSE)
}
# Returns TRUE if all the signs are expected
hasExpectedSigns <- function(solution) {
if (solution$error == "") {
return(TRUE)
}
return(FALSE)
}
# Make a key from a combination of Lambdas
getKey <- function(lambdas) {
key <- "";
for (i in 1:length(lambdas)){
key <- paste(key, lambdas[i], sep="")
}
return(key)
}
# Returns the lambdas of a given solution
getLambdas <- function(solution, nbVariables) {
lambdas <- c()
for (i in 1:nbVariables) {
if (i == 1) s <- "solution$lambda.cost"
if (i == 2) s <- "solution$lambda.duration"
if (i == 3) s <- "solution$lambda.length"
value <<- eval(parse(text = s))
lambdas <- c(lambdas, value)
}
return(lambdas)
}
# Draw a random combination of lambda's
randomDrawLambdas <- function(nbLambdas, range, granularity) {
lambdas <- c()
for (j in 1:nbLambdas) {
z <- sample(1:nbSteps, 1)
lambda <- -range - granularity + (z * granularity)
lambdas <- c(lambdas, round(lambda, 1))
}
return(lambdas)
}
# Identify, in the brute force results, the solutions with a given signif level.
# One can decide to test or not the signif level of the intercepts.
# (This is a piece of ugly code that could be improved)
#
# bfSolution : dataframe that contains the brute force solutions
# nbVariables : 1, 2 or 3
# minSigLevel : minimum signif. level to retain for the estimators (# " " = 1, ." = 0, "*" = 1, "**" = 2, "***" = 3)
# withSignificantIntercepts : if TRUE, the signif. levels of the intercepts must also be larger or equal that minSigLevel
markValidSolutions <- function(bfSolutions, nbVariables, minSigLevel, withSignificantIntercepts) {
bfSolutions$sig1b <- -1
bfSolutions$sig1b[bfSolutions$sig.const.iww == "."] <- 0
bfSolutions$sig1b[bfSolutions$sig.const.iww == "*"] <- 1
bfSolutions$sig1b[bfSolutions$sig.const.iww == "**"] <- 2
bfSolutions$sig1b[bfSolutions$sig.const.iww == "***"] <- 3
bfSolutions$sig2b <- -1
bfSolutions$sig2b[bfSolutions$sig.const.rail == "."] <- 0
bfSolutions$sig2b[bfSolutions$sig.const.rail == "*"] <- 1
bfSolutions$sig2b[bfSolutions$sig.const.rail == "**"] <- 2
bfSolutions$sig2b[bfSolutions$sig.const.rail == "***"] <- 3
bfSolutions$sig3b <- -1
bfSolutions$sig3b[bfSolutions$sig.cost == "."] <- 0
bfSolutions$sig3b[bfSolutions$sig.cost == "*"] <- 1
bfSolutions$sig3b[bfSolutions$sig.cost == "**"] <- 2
bfSolutions$sig3b[bfSolutions$sig.cost == "***"] <- 3
if (nbVariables > 1) {
bfSolutions$sig4b <- -1
bfSolutions$sig4b[bfSolutions$sig.duration == "."] <- 0
bfSolutions$sig4b[bfSolutions$sig.duration == "*"] <- 1
bfSolutions$sig4b[bfSolutions$sig.duration == "**"] <- 2
bfSolutions$sig4b[bfSolutions$sig.duration == "***"] <- 3
}
if (nbVariables > 2) {
bfSolutions$sig5b <- -1
bfSolutions$sig5b[bfSolutions$sig.length == "."] <- 0
bfSolutions$sig5b[bfSolutions$sig.length == "*"] <- 1
bfSolutions$sig5b[bfSolutions$sig.length == "**"] <- 2
bfSolutions$sig5b[bfSolutions$sig.length == "***"] <- 3
}
bfSolutions$keep <- FALSE
bfSolutions$best <- FALSE
if (nbVariables == 1) {
if (withSignificantIntercepts) {
bfSolutions$keep[bfSolutions$cost < 0 & bfSolutions$sig1b >= minSignifLevel & bfSolutions$sig2b >= minSignifLevel & bfSolutions$sig3b >= minSignifLevel] <- TRUE
} else {
bfSolutions$keep[bfSolutions$cost < 0 & bfSolutions$sig3b >= minSignifLevel] <- TRUE
}
}
if (nbVariables == 2) {
if (withSignificantIntercepts) {
bfSolutions$keep[bfSolutions$cost < 0 & bfSolutions$duration < 0 & bfSolutions$sig1b >= minSignifLevel & bfSolutions$sig2b >= minSignifLevel & bfSolutions$sig3b >= minSignifLevel & bfSolutions$sig4b >= minSignifLevel] <- TRUE
} else {
bfSolutions$keep[bfSolutions$cost < 0 & bfSolutions$duration < 0 & bfSolutions$sig3b >= minSignifLevel & bfSolutions$sig4b >= minSignifLevel] <- TRUE
}
}
if (nbVariables == 3) {
if (withSignificantIntercepts) {
bfSolutions$keep[bfSolutions$cost < 0 & bfSolutions$duration < 0 & bfSolutions$length < 0 & bfSolutions$sig1b >= minSignifLevel & bfSolutions$sig2b >= minSignifLevel & bfSolutions$sig3b >= minSignifLevel & bfSolutions$sig4b >= minSignifLevel & bfSolutions$sig5b >= minSignifLevel] <- TRUE
} else {
bfSolutions$keep[bfSolutions$cost < 0 & bfSolutions$duration < 0 & bfSolutions$length < 0 &bfSolutions$sig3b >= minSignifLevel & bfSolutions$sig4b >= minSignifLevel & bfSolutions$sig5b >= minSignifLevel] <- TRUE
}
}
bfSolutions$sig1b <- NULL
bfSolutions$sig2b <- NULL
bfSolutions$sig3b <- NULL
bfSolutions$sig4b <- NULL
bfSolutions$sig5b <- NULL
return (bfSolutions)
}
|
/_Utils.R
|
permissive
|
jourquin/Box-Cox-Lambdas-Heuristic
|
R
| false
| false
| 7,115
|
r
|
# Copyright (c) 2020 Université catholique de Louvain Center for Operations Research and Econometrics (CORE) http://www.uclouvain.be
# Written by Pr Bart Jourquin, bart.jourquin@uclouvain.be
#
# A few convenience functions used in more than one script in this project
#
# Test if all the estimators are of the expected sign (must be negative)
allSignsAreExpected <- function(model) {
c <- coef(model)
correctSign <- TRUE
# Browse de coefficients names (see output of "summary(model)")
for (j in 1:length(c)) {
name <- names(c[j])
if (substring(name, 1, 1) != "(") {
# "(Intercept)" must not be tested
if (c[name] > 0) {
correctSign <- FALSE
break
}
}
}
return(correctSign)
}
# Get Pr(>|t|) of model for all coefficients
allCoefsAreSignificant <- function(model, nbStars, withSignificantIntercepts) {
b <- coef(model, order = TRUE)
std.err2 <- sqrt(diag(vcov(model)))
std.err <- b
std.err[names(std.err2)] <- std.err2
z <- b / std.err
p <- 2 * (1 - pnorm(abs(z)))
# Test or not the signif.level of the 2 intercepts
startIdx <- 3
if (withSignificantIntercepts) {
startIdx <- 1
}
startIdx = 1
for (j in startIdx:length(p)) {
if (nbStars == 3 && p[j] > 0.001) {
return(FALSE)
}
if (nbStars == 2 && p[j] > 0.01) {
return(FALSE)
}
if (nbStars == 1 && p[j] > 0.05) {
return(FALSE)
}
if (nbStars == 0 && p[j] > 0.1) {
return(FALSE)
}
if (nbStars == 0 && p[j] > 1) {
return(FALSE)
}
}
return(TRUE)
}
# Get Pr(>|t|) of model for a given coefficient
getStars <- function(model, coefName) {
b <- coef(model, order = TRUE)
std.err2 <- sqrt(diag(vcov(model)))
std.err <- b
std.err[names(std.err2)] <- std.err2
z <- b / std.err
p <- 2 * (1 - pnorm(abs(z)))
pp <- p[coefName]
if (pp <= 0.001) {
return("***")
}
if (pp <= 0.01) {
return("**")
}
if (pp <= 0.05) {
return("*")
}
if (pp <= 0.1) {
return(".")
}
if (pp <= 1) {
return(" ")
}
}
# Returns TRUE if signs are expected and all the estimators are enough significant
isValid <- function(solution) {
# When looking in stored results (HeuristicVsBruteForce.R)
if("keep" %in% colnames(solution)) {
return (solution$keep)
}
# When used in "real" conditions (Heuristic.R)
if (solution$error == "") {
return(TRUE)
}
return(FALSE)
}
# Returns TRUE if all the signs are expected
hasExpectedSigns <- function(solution) {
if (solution$error == "") {
return(TRUE)
}
return(FALSE)
}
# Make a key from a combination of Lambdas
getKey <- function(lambdas) {
key <- "";
for (i in 1:length(lambdas)){
key <- paste(key, lambdas[i], sep="")
}
return(key)
}
# Returns the lambdas of a given solution
getLambdas <- function(solution, nbVariables) {
lambdas <- c()
for (i in 1:nbVariables) {
if (i == 1) s <- "solution$lambda.cost"
if (i == 2) s <- "solution$lambda.duration"
if (i == 3) s <- "solution$lambda.length"
value <<- eval(parse(text = s))
lambdas <- c(lambdas, value)
}
return(lambdas)
}
# Draw a random combination of lambda's
randomDrawLambdas <- function(nbLambdas, range, granularity) {
lambdas <- c()
for (j in 1:nbLambdas) {
z <- sample(1:nbSteps, 1)
lambda <- -range - granularity + (z * granularity)
lambdas <- c(lambdas, round(lambda, 1))
}
return(lambdas)
}
# Identify, in the brute force results, the solutions with a given signif level.
# One can decide to test or not the signif level of the intercepts.
# (This is a piece of ugly code that could be improved)
#
# bfSolution : dataframe that contains the brute force solutions
# nbVariables : 1, 2 or 3
# minSigLevel : minimum signif. level to retain for the estimators (# " " = 1, ." = 0, "*" = 1, "**" = 2, "***" = 3)
# withSignificantIntercepts : if TRUE, the signif. levels of the intercepts must also be larger or equal that minSigLevel
markValidSolutions <- function(bfSolutions, nbVariables, minSigLevel, withSignificantIntercepts) {
bfSolutions$sig1b <- -1
bfSolutions$sig1b[bfSolutions$sig.const.iww == "."] <- 0
bfSolutions$sig1b[bfSolutions$sig.const.iww == "*"] <- 1
bfSolutions$sig1b[bfSolutions$sig.const.iww == "**"] <- 2
bfSolutions$sig1b[bfSolutions$sig.const.iww == "***"] <- 3
bfSolutions$sig2b <- -1
bfSolutions$sig2b[bfSolutions$sig.const.rail == "."] <- 0
bfSolutions$sig2b[bfSolutions$sig.const.rail == "*"] <- 1
bfSolutions$sig2b[bfSolutions$sig.const.rail == "**"] <- 2
bfSolutions$sig2b[bfSolutions$sig.const.rail == "***"] <- 3
bfSolutions$sig3b <- -1
bfSolutions$sig3b[bfSolutions$sig.cost == "."] <- 0
bfSolutions$sig3b[bfSolutions$sig.cost == "*"] <- 1
bfSolutions$sig3b[bfSolutions$sig.cost == "**"] <- 2
bfSolutions$sig3b[bfSolutions$sig.cost == "***"] <- 3
if (nbVariables > 1) {
bfSolutions$sig4b <- -1
bfSolutions$sig4b[bfSolutions$sig.duration == "."] <- 0
bfSolutions$sig4b[bfSolutions$sig.duration == "*"] <- 1
bfSolutions$sig4b[bfSolutions$sig.duration == "**"] <- 2
bfSolutions$sig4b[bfSolutions$sig.duration == "***"] <- 3
}
if (nbVariables > 2) {
bfSolutions$sig5b <- -1
bfSolutions$sig5b[bfSolutions$sig.length == "."] <- 0
bfSolutions$sig5b[bfSolutions$sig.length == "*"] <- 1
bfSolutions$sig5b[bfSolutions$sig.length == "**"] <- 2
bfSolutions$sig5b[bfSolutions$sig.length == "***"] <- 3
}
bfSolutions$keep <- FALSE
bfSolutions$best <- FALSE
if (nbVariables == 1) {
if (withSignificantIntercepts) {
bfSolutions$keep[bfSolutions$cost < 0 & bfSolutions$sig1b >= minSignifLevel & bfSolutions$sig2b >= minSignifLevel & bfSolutions$sig3b >= minSignifLevel] <- TRUE
} else {
bfSolutions$keep[bfSolutions$cost < 0 & bfSolutions$sig3b >= minSignifLevel] <- TRUE
}
}
if (nbVariables == 2) {
if (withSignificantIntercepts) {
bfSolutions$keep[bfSolutions$cost < 0 & bfSolutions$duration < 0 & bfSolutions$sig1b >= minSignifLevel & bfSolutions$sig2b >= minSignifLevel & bfSolutions$sig3b >= minSignifLevel & bfSolutions$sig4b >= minSignifLevel] <- TRUE
} else {
bfSolutions$keep[bfSolutions$cost < 0 & bfSolutions$duration < 0 & bfSolutions$sig3b >= minSignifLevel & bfSolutions$sig4b >= minSignifLevel] <- TRUE
}
}
if (nbVariables == 3) {
if (withSignificantIntercepts) {
bfSolutions$keep[bfSolutions$cost < 0 & bfSolutions$duration < 0 & bfSolutions$length < 0 & bfSolutions$sig1b >= minSignifLevel & bfSolutions$sig2b >= minSignifLevel & bfSolutions$sig3b >= minSignifLevel & bfSolutions$sig4b >= minSignifLevel & bfSolutions$sig5b >= minSignifLevel] <- TRUE
} else {
bfSolutions$keep[bfSolutions$cost < 0 & bfSolutions$duration < 0 & bfSolutions$length < 0 &bfSolutions$sig3b >= minSignifLevel & bfSolutions$sig4b >= minSignifLevel & bfSolutions$sig5b >= minSignifLevel] <- TRUE
}
}
bfSolutions$sig1b <- NULL
bfSolutions$sig2b <- NULL
bfSolutions$sig3b <- NULL
bfSolutions$sig4b <- NULL
bfSolutions$sig5b <- NULL
return (bfSolutions)
}
|
# Loading A1-1_pages.csv
pages_dataset<-read.csv("~/Desktop/Brandless/brandless_take_home_exercise_data/A1-1_pages.csv",header =TRUE)
# Top 10 Visited pages
top_10_visited_pages<-sqldf("SELECT count(path) as Count,Path from pages_dataset group by 2 order by 1 desc limit 10")
# Re-Ordering
top_10_visited_pages$path<-factor(top_10_visited_pages$path,levels = c("/","/category/food","/shop_all","/category/home-and-office","/category/beauty","/category/personal-care","/category/household-supplies","/category/health","/about","/checkout/email"))
# Top 10 pages visited in a ggplot
ggplot(top_10_visited_pages,aes(x=path,y=Count,label=Count))+labs(title="Top 10 pages visited",x="Path",y="# of times visited")+theme(axis.text.x = element_text(angle =45,vjust = 0.5))+geom_bar(stat="identity", width = 0.5, aes(fill=path))+geom_text(hjust=0.09,angle=45)+theme(text=element_text(size=10, family="Comic Sans MS"))
# Loading A1-1_tracks.csv
tracks_dataset<-read.csv("~/Desktop/Brandless/brandless_take_home_exercise_data/A1-1_tracks.csv",header =TRUE)
# Joining Tracks and Pages
tracks_and_pages_joined<-sqldf("SELECT count(a.event) as count, a.event as event, b.path FROM tracks_dataset a INNER JOIN top_10_visited_pages b ON a.context_page_path=b.path GROUP BY 2,3 order by count desc,path")
# Grouping and Ranking
tracks_and_pages_Rank<-tracks_and_pages_joined %>% group_by(path) %>% mutate(ranks=order(count,decreasing = TRUE))
# Top 5 tracks from Top 10 most visited pages
top_5_tracks<-filter(tracks_and_pages_Rank,ranks<6)
# Re-Ordering
top_5_tracks<-top_5_tracks %>% group_by(path) %>% arrange(desc(count),.by_group = TRUE)
# Visualizing Top 5 Events in Top 10 pages Visited
ggplot(top_5_tracks,aes(x=path,y=count,fill=event))+geom_bar(stat = "identity",position="fill")+theme(axis.text.x = element_text(angle =45,vjust = 0.5))+theme(text=element_text(size=10, family="Comic Sans MS"))+scale_fill_brewer(palette="Spectral")+labs(title="Top 5 Events Tracked",x="Path",y="# of times events Clicked")
# Visualizing Top 5 Events in Top 10 pages Visited by Percentage
ggplot(top_5_tracks,aes(x=path,y=count,fill=event))+geom_bar(stat = "identity",position="fill")+theme(axis.text.x = element_text(angle =45,vjust = 0.5))+theme(text=element_text(size=10, family="Comic Sans MS"))+scale_fill_brewer(palette="Spectral")+labs(title="Top 5 Events Tracked by Percentage",x="Path",y="# of times events Clicked by Percentage")+scale_y_continuous(labels = percent_format())
#Scaling to events to 100% and then retrieving individual event percentages
top_5_tracks<-sqldf("SELECT b.sum as total_per_path,a.count as track_count,a.event,a.path as path,a.ranks as ranks FROM (Select count as count, event as event,path as path, ranks as ranks from top_5_tracks) as a, (Select Sum(count) as Sum,path from top_5_tracks group by path ) as b where a.path=b.path ")
top_5_tracks$percentage <-paste(round((top_5_tracks$track_count/top_5_tracks$total_per_path)*100,1),"%")
|
/A1- Raw Tracks and Pages Data .r
|
no_license
|
cdaniel7/R
|
R
| false
| false
| 2,969
|
r
|
# Loading A1-1_pages.csv
pages_dataset<-read.csv("~/Desktop/Brandless/brandless_take_home_exercise_data/A1-1_pages.csv",header =TRUE)
# Top 10 Visited pages
top_10_visited_pages<-sqldf("SELECT count(path) as Count,Path from pages_dataset group by 2 order by 1 desc limit 10")
# Re-Ordering
top_10_visited_pages$path<-factor(top_10_visited_pages$path,levels = c("/","/category/food","/shop_all","/category/home-and-office","/category/beauty","/category/personal-care","/category/household-supplies","/category/health","/about","/checkout/email"))
# Top 10 pages visited in a ggplot
ggplot(top_10_visited_pages,aes(x=path,y=Count,label=Count))+labs(title="Top 10 pages visited",x="Path",y="# of times visited")+theme(axis.text.x = element_text(angle =45,vjust = 0.5))+geom_bar(stat="identity", width = 0.5, aes(fill=path))+geom_text(hjust=0.09,angle=45)+theme(text=element_text(size=10, family="Comic Sans MS"))
# Loading A1-1_tracks.csv
tracks_dataset<-read.csv("~/Desktop/Brandless/brandless_take_home_exercise_data/A1-1_tracks.csv",header =TRUE)
# Joining Tracks and Pages
tracks_and_pages_joined<-sqldf("SELECT count(a.event) as count, a.event as event, b.path FROM tracks_dataset a INNER JOIN top_10_visited_pages b ON a.context_page_path=b.path GROUP BY 2,3 order by count desc,path")
# Grouping and Ranking
tracks_and_pages_Rank<-tracks_and_pages_joined %>% group_by(path) %>% mutate(ranks=order(count,decreasing = TRUE))
# Top 5 tracks from Top 10 most visited pages
top_5_tracks<-filter(tracks_and_pages_Rank,ranks<6)
# Re-Ordering
top_5_tracks<-top_5_tracks %>% group_by(path) %>% arrange(desc(count),.by_group = TRUE)
# Visualizing Top 5 Events in Top 10 pages Visited
ggplot(top_5_tracks,aes(x=path,y=count,fill=event))+geom_bar(stat = "identity",position="fill")+theme(axis.text.x = element_text(angle =45,vjust = 0.5))+theme(text=element_text(size=10, family="Comic Sans MS"))+scale_fill_brewer(palette="Spectral")+labs(title="Top 5 Events Tracked",x="Path",y="# of times events Clicked")
# Visualizing Top 5 Events in Top 10 pages Visited by Percentage
ggplot(top_5_tracks,aes(x=path,y=count,fill=event))+geom_bar(stat = "identity",position="fill")+theme(axis.text.x = element_text(angle =45,vjust = 0.5))+theme(text=element_text(size=10, family="Comic Sans MS"))+scale_fill_brewer(palette="Spectral")+labs(title="Top 5 Events Tracked by Percentage",x="Path",y="# of times events Clicked by Percentage")+scale_y_continuous(labels = percent_format())
#Scaling to events to 100% and then retrieving individual event percentages
top_5_tracks<-sqldf("SELECT b.sum as total_per_path,a.count as track_count,a.event,a.path as path,a.ranks as ranks FROM (Select count as count, event as event,path as path, ranks as ranks from top_5_tracks) as a, (Select Sum(count) as Sum,path from top_5_tracks group by path ) as b where a.path=b.path ")
top_5_tracks$percentage <-paste(round((top_5_tracks$track_count/top_5_tracks$total_per_path)*100,1),"%")
|
#' @title Run Fisher's Method to combine p values
#'
#' @description Fisher's Method combines p values from different statistical models.
FishersMethod <- function(GOde, GOdm) {
# GOdm$P.DM <- GOdm$P.DE
# GOdm$P.DE <- NULL
GO_Combined <- dplyr::left_join(GOdm, GOde, by = c("Term", "Ont", "N"))
CombinedPVals <- dplyr::data_frame(GO_Combined$P.DE.x, GO_Combined$P.DE.y)
PvalsCombined <- (1 - pchisq(rowSums(-2 * log(CombinedPVals)), df = 2 * length(CombinedPVals)))
GO_Combined$Combined_Pvalues <- PvalsCombined
GO_Combined
}
|
/R/FishersMethod.R
|
no_license
|
UofABioinformaticsHub/InteGRAPE
|
R
| false
| false
| 548
|
r
|
#' @title Run Fisher's Method to combine p values
#'
#' @description Fisher's Method combines p values from different statistical models.
FishersMethod <- function(GOde, GOdm) {
# GOdm$P.DM <- GOdm$P.DE
# GOdm$P.DE <- NULL
GO_Combined <- dplyr::left_join(GOdm, GOde, by = c("Term", "Ont", "N"))
CombinedPVals <- dplyr::data_frame(GO_Combined$P.DE.x, GO_Combined$P.DE.y)
PvalsCombined <- (1 - pchisq(rowSums(-2 * log(CombinedPVals)), df = 2 * length(CombinedPVals)))
GO_Combined$Combined_Pvalues <- PvalsCombined
GO_Combined
}
|
library("plyr")
library("stringr")
setwd("~/OneDrive/alignmentbench/")
load("supportfiles/gene_mapping.rda")
getKallisto <- function(sra, mapping){
file = paste0("quant/kallisto/",sra,"/abundance.tsv")
input = read.table(file, sep="\t", stringsAsFactors=F, header=T)[,c(1,4)]
input[,1] = gsub("\\.[0-9]$","", input[,1])
gene_match = match(input[,1], mapping[,1])
transcript_counts = data.frame(mapping[gene_match,2], round(as.numeric(input[,2])))
colnames(transcript_counts) = c("gene", "value")
dd = ddply(transcript_counts,.(gene),summarize,sum=sum(value),number=length(gene))
ge = dd[,2]
names(ge) = dd[,1]
return(ge)
}
getSalmon <- function(sra, mapping){
file = paste0("quant/salmon/",sra,"/quant.sf")
input = read.table(file, sep="\t", stringsAsFactors=F, header=T)[,c(1,5)]
input[,1] = gsub("\\.[0-9]$","", input[,1])
gene_match = match(input[,1], mapping[,1])
transcript_counts = data.frame(mapping[gene_match,2], round(as.numeric(input[,2])))
colnames(transcript_counts) = c("gene", "value")
dd = ddply(transcript_counts,.(gene),summarize,sum=sum(value),number=length(gene))
ge = dd[,2]
names(ge) = dd[,1]
return(ge)
}
getHISAT2 <- function(sra, mapping){
file = paste0("quant/hisat2/",sra,"/",sra,".tsv")
input = read.table(file, sep="\t", stringsAsFactors=F, header=T)[,c(1,7)]
gene_map = match(input[,1], mapping[,3])
ww = which(!is.na(gene_map))
ge = input[ww,2]
names(ge) = mapping[gene_map[ww],2]
return(ge)
}
getSTAR <- function(sra, mapping){
file = paste0("quant/star/",sra,"/",sra,"ReadsPerGene.out.tab")
input = read.table(file, sep="\t", stringsAsFactors=F, skip=4)[,c(1,2)]
gene_map = match(input[,1], mapping[,3])
ww = which(!is.na(gene_map))
ge = input[ww,2]
names(ge) = mapping[gene_map[ww],2]
return(ge)
}
gene_count_kallisto = getKallisto("SRR827478", human_map)
gene_count_salmon = getSalmon("SRR886587", human_map)
gene_count_hisat2 = getHISAT2("SRR901183", human_map)
gene_count_star = getSTAR("SRR827478", human_map)
inter1 = intersect(names(gene_count_kallisto), names(gene_count_salmon))
inter2 = intersect(names(gene_count_hisat2), names(gene_count_star))
inter = sort(intersect(inter1, inter2))
kallisto = gene_count_kallisto[inter]
salmon = gene_count_salmon[inter]
hisat2 = gene_count_hisat2[inter]
star = gene_count_star[inter]
expression = do.call(cbind, list(kallisto, salmon, hisat2, star))
colnames(expression) = c("kallisto","Salmon","HISAT2","STAR")
gg = grep("A.*\\.[0-9]$", rownames(expression))
expression = expression[-gg,]
scatterplotMatrix(log2(expression[sample(1:nrow(expression), 2000),]+1), pch=".",
regLine = list(method=lm, lty=1, lwd=2, col="red"),
col="black")
genequant<-function(abundance){
#args <- commandArgs(TRUE)
#mapping = args[1]
#res = load(mapping)
kf = "quant/kallisto/SRR1002568/abundance.tsv"
sf = "quant/salmon/SRR1002568/quant.sf"
kallisto = read.table(kf, sep="\t", stringsAsFactors=F, header=T)
salmon = read.table(sf, sep="\t", stringsAsFactors=F, header=T)
salmon = salmon[,c(1,2,3,5,4)]
inter = intersect(kallisto[,1], salmon[,1])
sdd = setdiff(kallisto[,1], salmon[,1])
ugene = cb[,2]
m3 = match(abu[,1], cb[,1])
cco = cbind(abu,ugene[m3])[-1,]
co = cco[,c(6,4)]
co[,1] = as.character(co[,1])
df = data.frame(co[,1], as.numeric(co[,2]))
colnames(df) = c("gene", "value")
dd = ddply(df,.(gene),summarize,sum=sum(value),number=length(gene))
ge = dd[,2]
names(ge) = dd[,1]
}
library(RColorBrewer)
darkcols <- brewer.pal(8, "Set1")
times = read.table("supportfiles/SRR7460337_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
oo = order(rowSums(times))
oo = c(2,1,4,3,5)
pdf("figures/speed_align_SRR7460337.pdf")
par(mar=c(6,6,6,6))
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:5], xlab="threads", ylab="seconds", cex.lab=1.8, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:5], bty = "n", cex=2)
dev.off()
times = read.table("supportfiles/SRR7460337_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
times = times[-5,]
oo = c(2,1,4,3)
pdf("figures/speed_align_SRR7460337_nobwa.pdf")
par(mar=c(6,6,6,6))
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:4], xlab="threads", ylab="seconds", cex.lab=1.8, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:4], bty = "n", cex=2)
dev.off()
times = read.table("supportfiles/SRR2972202_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
oo = order(rowSums(times))
oo = c(2,1,4,3,5)
pdf("figures/speed_align_SRR2972202.pdf")
par(mar=c(6,6,6,6))
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:5], xlab="threads", ylab="seconds", cex.lab=1.8, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:5], bty = "n", cex=2)
dev.off()
times = read.table("supportfiles/SRR2972202_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
times = times[-5,]
oo = c(2,1,4,3)
pdf("figures/speed_align_SRR2972202_nobwa.pdf")
par(mar=c(6,6,6,6))
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:4], xlab="threads", ylab="seconds", cex.lab=1.8, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:4], bty = "n", cex=2)
dev.off()
pdf("figures/speed_align_SRR7460337_SRR2972202.pdf", 14, 8)
par(mfrow=c(1,2))
par(mar=c(5,5,4,1))
times = read.table("supportfiles/SRR7460337_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
oo = order(rowSums(times))
oo = c(2,1,4,3,5)
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:5], xlab="threads", ylab="seconds", cex.lab=2, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:5], bty = "n", cex=2)
times = read.table("supportfiles/SRR2972202_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
oo = order(rowSums(times))
oo = c(2,1,4,3,5)
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:5], xlab="threads", ylab="seconds", cex.lab=2, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:5], bty = "n", cex=2)
dev.off()
pdf("figures/speed_align_SRR7460337_SRR2972202_nobwa.pdf", 14, 8)
par(mfrow=c(1,2))
par(mar=c(5,5,4,1))
times = read.table("supportfiles/SRR7460337_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
oo = order(rowSums(times))
oo = c(2,1,4,3)
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:4], xlab="threads", ylab="seconds", cex.lab=2, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:4], bty = "n", cex=2)
times = read.table("supportfiles/SRR2972202_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
oo = order(rowSums(times))
oo = c(2,1,4,3)
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:4], xlab="threads", ylab="seconds", cex.lab=2, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:4], bty = "n", cex=2)
dev.off()
|
/scripts/read_quantification.r
|
no_license
|
lachmann12/alignmentbenchmark
|
R
| false
| false
| 7,763
|
r
|
library("plyr")
library("stringr")
setwd("~/OneDrive/alignmentbench/")
load("supportfiles/gene_mapping.rda")
getKallisto <- function(sra, mapping){
file = paste0("quant/kallisto/",sra,"/abundance.tsv")
input = read.table(file, sep="\t", stringsAsFactors=F, header=T)[,c(1,4)]
input[,1] = gsub("\\.[0-9]$","", input[,1])
gene_match = match(input[,1], mapping[,1])
transcript_counts = data.frame(mapping[gene_match,2], round(as.numeric(input[,2])))
colnames(transcript_counts) = c("gene", "value")
dd = ddply(transcript_counts,.(gene),summarize,sum=sum(value),number=length(gene))
ge = dd[,2]
names(ge) = dd[,1]
return(ge)
}
getSalmon <- function(sra, mapping){
file = paste0("quant/salmon/",sra,"/quant.sf")
input = read.table(file, sep="\t", stringsAsFactors=F, header=T)[,c(1,5)]
input[,1] = gsub("\\.[0-9]$","", input[,1])
gene_match = match(input[,1], mapping[,1])
transcript_counts = data.frame(mapping[gene_match,2], round(as.numeric(input[,2])))
colnames(transcript_counts) = c("gene", "value")
dd = ddply(transcript_counts,.(gene),summarize,sum=sum(value),number=length(gene))
ge = dd[,2]
names(ge) = dd[,1]
return(ge)
}
getHISAT2 <- function(sra, mapping){
file = paste0("quant/hisat2/",sra,"/",sra,".tsv")
input = read.table(file, sep="\t", stringsAsFactors=F, header=T)[,c(1,7)]
gene_map = match(input[,1], mapping[,3])
ww = which(!is.na(gene_map))
ge = input[ww,2]
names(ge) = mapping[gene_map[ww],2]
return(ge)
}
getSTAR <- function(sra, mapping){
file = paste0("quant/star/",sra,"/",sra,"ReadsPerGene.out.tab")
input = read.table(file, sep="\t", stringsAsFactors=F, skip=4)[,c(1,2)]
gene_map = match(input[,1], mapping[,3])
ww = which(!is.na(gene_map))
ge = input[ww,2]
names(ge) = mapping[gene_map[ww],2]
return(ge)
}
gene_count_kallisto = getKallisto("SRR827478", human_map)
gene_count_salmon = getSalmon("SRR886587", human_map)
gene_count_hisat2 = getHISAT2("SRR901183", human_map)
gene_count_star = getSTAR("SRR827478", human_map)
inter1 = intersect(names(gene_count_kallisto), names(gene_count_salmon))
inter2 = intersect(names(gene_count_hisat2), names(gene_count_star))
inter = sort(intersect(inter1, inter2))
kallisto = gene_count_kallisto[inter]
salmon = gene_count_salmon[inter]
hisat2 = gene_count_hisat2[inter]
star = gene_count_star[inter]
expression = do.call(cbind, list(kallisto, salmon, hisat2, star))
colnames(expression) = c("kallisto","Salmon","HISAT2","STAR")
gg = grep("A.*\\.[0-9]$", rownames(expression))
expression = expression[-gg,]
scatterplotMatrix(log2(expression[sample(1:nrow(expression), 2000),]+1), pch=".",
regLine = list(method=lm, lty=1, lwd=2, col="red"),
col="black")
genequant<-function(abundance){
#args <- commandArgs(TRUE)
#mapping = args[1]
#res = load(mapping)
kf = "quant/kallisto/SRR1002568/abundance.tsv"
sf = "quant/salmon/SRR1002568/quant.sf"
kallisto = read.table(kf, sep="\t", stringsAsFactors=F, header=T)
salmon = read.table(sf, sep="\t", stringsAsFactors=F, header=T)
salmon = salmon[,c(1,2,3,5,4)]
inter = intersect(kallisto[,1], salmon[,1])
sdd = setdiff(kallisto[,1], salmon[,1])
ugene = cb[,2]
m3 = match(abu[,1], cb[,1])
cco = cbind(abu,ugene[m3])[-1,]
co = cco[,c(6,4)]
co[,1] = as.character(co[,1])
df = data.frame(co[,1], as.numeric(co[,2]))
colnames(df) = c("gene", "value")
dd = ddply(df,.(gene),summarize,sum=sum(value),number=length(gene))
ge = dd[,2]
names(ge) = dd[,1]
}
library(RColorBrewer)
darkcols <- brewer.pal(8, "Set1")
times = read.table("supportfiles/SRR7460337_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
oo = order(rowSums(times))
oo = c(2,1,4,3,5)
pdf("figures/speed_align_SRR7460337.pdf")
par(mar=c(6,6,6,6))
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:5], xlab="threads", ylab="seconds", cex.lab=1.8, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:5], bty = "n", cex=2)
dev.off()
times = read.table("supportfiles/SRR7460337_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
times = times[-5,]
oo = c(2,1,4,3)
pdf("figures/speed_align_SRR7460337_nobwa.pdf")
par(mar=c(6,6,6,6))
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:4], xlab="threads", ylab="seconds", cex.lab=1.8, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:4], bty = "n", cex=2)
dev.off()
times = read.table("supportfiles/SRR2972202_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
oo = order(rowSums(times))
oo = c(2,1,4,3,5)
pdf("figures/speed_align_SRR2972202.pdf")
par(mar=c(6,6,6,6))
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:5], xlab="threads", ylab="seconds", cex.lab=1.8, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:5], bty = "n", cex=2)
dev.off()
times = read.table("supportfiles/SRR2972202_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
times = times[-5,]
oo = c(2,1,4,3)
pdf("figures/speed_align_SRR2972202_nobwa.pdf")
par(mar=c(6,6,6,6))
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:4], xlab="threads", ylab="seconds", cex.lab=1.8, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:4], bty = "n", cex=2)
dev.off()
pdf("figures/speed_align_SRR7460337_SRR2972202.pdf", 14, 8)
par(mfrow=c(1,2))
par(mar=c(5,5,4,1))
times = read.table("supportfiles/SRR7460337_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
oo = order(rowSums(times))
oo = c(2,1,4,3,5)
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:5], xlab="threads", ylab="seconds", cex.lab=2, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:5], bty = "n", cex=2)
times = read.table("supportfiles/SRR2972202_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
oo = order(rowSums(times))
oo = c(2,1,4,3,5)
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:5], xlab="threads", ylab="seconds", cex.lab=2, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:5], bty = "n", cex=2)
dev.off()
pdf("figures/speed_align_SRR7460337_SRR2972202_nobwa.pdf", 14, 8)
par(mfrow=c(1,2))
par(mar=c(5,5,4,1))
times = read.table("supportfiles/SRR7460337_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
oo = order(rowSums(times))
oo = c(2,1,4,3)
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:4], xlab="threads", ylab="seconds", cex.lab=2, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:4], bty = "n", cex=2)
times = read.table("supportfiles/SRR2972202_time.tsv", sep="\t", stringsAsFactors=F)
rownames(times) = times[,1]
times = times[,-1]
colnames(times) = c(1,2,3,4,6,8,12,16)
oo = order(rowSums(times))
oo = c(2,1,4,3)
barplot(as.matrix(times[oo,]), beside=TRUE, col=darkcols[1:4], xlab="threads", ylab="seconds", cex.lab=2, cex.axis=2, cex.names=1.8)
legend("topright",legend=rownames(times)[oo], fill=darkcols[1:4], bty = "n", cex=2)
dev.off()
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/zeroenv.R
\name{search_path_trim}
\alias{search_path_trim}
\title{Restore Search Path to Bare Bones R Default}
\usage{
search_path_trim(keep = c("package:unitizer", "tools:rstudio"))
}
\arguments{
\item{keep}{character names of packages/objects to keep in search path;
note that base packages (see .unitizer.base.packages) that come typically
pre attached are always kept. The \code{`keep`} packages are an addition
to those.}
}
\value{
invisibly TRUE on success, FALSE on failure
}
\description{
\code{`search_path_trimp`} attempts to recreate a clean environment by
unloading all packages and objects that are not loaded by default in the
default R configuration.
}
\details{
Note this does not unload namespaces, but rather just detaches them from
the namespace
\code{`tools:rstudio`} is kept in search path as the default argument because
it isn't possible to cleanly unload and reload it because \code{`attach`}
actually attaches a copy of it's argument, not the actual object, and that
causes problems for that search path item.
}
\seealso{
\code{`\link{search_path_restore}`} \code{`\link{search}`}
}
\keyword{internal}
|
/man/search_path_trim.Rd
|
no_license
|
loerasg/unitizer
|
R
| false
| false
| 1,218
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/zeroenv.R
\name{search_path_trim}
\alias{search_path_trim}
\title{Restore Search Path to Bare Bones R Default}
\usage{
search_path_trim(keep = c("package:unitizer", "tools:rstudio"))
}
\arguments{
\item{keep}{character names of packages/objects to keep in search path;
note that base packages (see .unitizer.base.packages) that come typically
pre attached are always kept. The \code{`keep`} packages are an addition
to those.}
}
\value{
invisibly TRUE on success, FALSE on failure
}
\description{
\code{`search_path_trimp`} attempts to recreate a clean environment by
unloading all packages and objects that are not loaded by default in the
default R configuration.
}
\details{
Note this does not unload namespaces, but rather just detaches them from
the namespace
\code{`tools:rstudio`} is kept in search path as the default argument because
it isn't possible to cleanly unload and reload it because \code{`attach`}
actually attaches a copy of it's argument, not the actual object, and that
causes problems for that search path item.
}
\seealso{
\code{`\link{search_path_restore}`} \code{`\link{search}`}
}
\keyword{internal}
|
#=====================================================================
# R_packages_UCL_5
#
# Install 5th batch of add on R packages for UCL R instllations.
#
# June 2016
# Latest update October 2017
mainLib <- Sys.getenv ("RLIB_MAIN");
dbLib <- Sys.getenv ("RLIB_DB");
repros <- Sys.getenv ("REPROS");
mainLib;
dbLib;
repros;
#
# For Antonia Ford (a.ford.11@ucl.ac.uk) - added Jan 2014
install.packages ("rgl", lib=mainLib, repos=repros);
install.packages ("fastmatch", lib=mainLib, repos=repros);
install.packages ("phangorn", lib=mainLib, repos=repros);
# For JAGS - added March 2014
install.packages ("coda", lib=mainLib, repos=repros);
install.packages ("rjags", lib=mainLib, repos=repros);
install.packages ("abind", lib=mainLib, repos=repros);
install.packages ("R2WinBUGS", lib=mainLib, repos=repros);
install.packages ("R2jags", lib=mainLib, repos=repros);
# For Stuart Peters (stuart.peters.13@ucl.ac.uk) June 2014
install.packages ("httpuv", lib=mainLib, repos=repros);
install.packages ("RJSONIO", lib=mainLib, repos=repros);
install.packages ("digest", lib=mainLib, repos=repros);
install.packages ("htmltools", lib=mainLib, repos=repros);
install.packages ("caTools", lib=mainLib, repos=repros);
install.packages ("shiny", lib=mainLib, repos=repros);
install.packages ("ade4", lib=mainLib, repos=repros);
install.packages ("ape", lib=mainLib, repos=repros);
install.packages ("plyr", lib=mainLib, repos=repros);
install.packages ("labeling", lib=mainLib, repos=repros);
install.packages ("dichromat", lib=mainLib, repos=repros);
install.packages ("colorspace", lib=mainLib, repos=repros);
install.packages ("munsell", lib=mainLib, repos=repros);
install.packages ("scales", lib=mainLib, repos=repros);
install.packages ("shiny", lib=mainLib, repos=repros);
install.packages ("gtable", lib=mainLib, repos=repros);
install.packages ("stringr", lib=mainLib, repos=repros);
install.packages ("reshape2", lib=mainLib, repos=repros);
install.packages ("proto", lib=mainLib, repos=repros);
install.packages ("ggplot2", lib=mainLib, repos=repros);
install.packages ("adegenet", lib=mainLib, repos=repros);
install.packages ("pegas", lib=mainLib, repos=repros);
install.packages ("stringdist", lib=mainLib, repos=repros);
# For Rodrigo Targino (r.targino.12@ucl.ac.uk) July 2014
install.packages ("HAC", lib=mainLib, repos=repros);
# For Mattia Cinelli (rebmmci@ucl.ac.uk) Nov 2014
install.packages ("e1071", lib=mainLib, repos=repros);
# Extra R packages needed for various Bioconductor packages.
install.packages ("gplots", lib=mainLib, repos=repros);
install.packages ("checkmate", lib=mainLib, repos=repros);
install.packages ("BBmisc", lib=mainLib, repos=repros);
install.packages ("base64enc", lib=mainLib, repos=repros);
install.packages ("sendmailR", lib=mainLib, repos=repros);
install.packages ("brew", lib=mainLib, repos=repros);
install.packages ("fail", lib=mainLib, repos=repros);
install.packages ("BatchJobs", lib=mainLib, repos=repros);
install.packages ("RMySQL", lib=mainLib, repos=repros);
install.packages ("R.methodsS3", lib=mainLib, repos=repros);
install.packages ("matrixStats", lib=mainLib, repos=repros);
install.packages ("base64", lib=mainLib, repos=repros);
install.packages ("gsmoothr", lib=mainLib, repos=repros);
install.packages ("R.cache", lib=mainLib, repos=repros);
install.packages ("R.filesets", lib=mainLib, repos=repros);
install.packages ("R.devices", lib=mainLib, repos=repros);
install.packages ("R.rsp", lib=mainLib, repos=repros);
install.packages ("PSCBS", lib=mainLib, repos=repros);
install.packages ("aroma.core", lib=mainLib, repos=repros);
install.packages ("R.huge", lib=mainLib, repos=repros);
install.packages ("truncnorm", lib=mainLib, repos=repros);
install.packages ("Rsolnp", lib=mainLib, repos=repros);
install.packages ("intervals", lib=mainLib, repos=repros);
install.packages ("colorRamps", lib=mainLib, repos=repros);
install.packages ("schoolmath", lib=mainLib, repos=repros);
install.packages ("LSD", lib=mainLib, repos=repros);
install.packages ("RcppArmadillo", lib=mainLib, repos=repros);
# For Matthew Jones (m.jones.12@ucl.ac.uk) June 2016 new in R 3.3.0
install.packages ("rstanarm", lib=mainLib, repos=repros);
# Required for the cancerit suite (https://github.com/cancerit)
install.packages ("gam", lib=mainLib, repos=repros);
install.packages ("VGAM", lib=mainLib, repos=repros);
install.packages ("poweRlaw", lib=mainLib, repos=repros);
# For Zahra Sabetsarvestani (ucakzsa@ucl.ac.uk) Aug 2016
install.packages ("mlr", lib=mainLib, repos=repros);
install.packages ("pracma", lib=mainLib, repos=repros);
install.packages ("softImpute", lib=mainLib, repos=repros);
install.packages ("caret", lib=mainLib, repos=repros);
install.packages ("quantreg", lib=mainLib, repos=repros);
install.packages ("randomForest", lib=mainLib, repos=repros);
# For Slava Mikhaylov from political science Sep 2016
install.packages ("relaimpo", lib=mainLib, repos=repros);
install.packages ("GGally", lib=mainLib, repos=repros);
install.packages ("effects", lib=mainLib, repos=repros);
install.packages ("HotDeckImputation", lib=mainLib, repos=repros);
install.packages ("psych", lib=mainLib, repos=repros);
# For use with snow examples
install.packages ("rlecuyer", lib=mainLib, repos=repros);
# More requsts from Political Science
install.packages ("rgdal", lib=mainLib, repos=repros);
install.packages ("rgeos", lib=mainLib, repos=repros);
install.packages ("erer", lib=mainLib, repos=repros);
install.packages ("panelAR", lib=mainLib, repos=repros);
install.packages ("arm", lib=mainLib, repos=repros);
install.packages ("systemfit", lib=mainLib, repos=repros);
# tmap requested by James Cheshire, Geography
# install V8's dependencies first, otherwise v8conf variables get lost in the interim
install.packages("Rcpp", lib=mainLib, repos=repros);
install.packages("jsonlite", lib=mainLib, repos=repros);
install.packages("curl", lib=mainLib, repos=repros);
v8conf <- 'INCLUDE_DIR=/shared/ucl/apps/v8/3.15/v8/include LIB_DIR=/shared/ucl/apps/v8/3.15/v8/out/x64.release/lib.target';
install.packages ("V8", lib=mainLib, repos=repros, configure.vars=v8conf);
udunits2Conf <- '--with-udunits2-include=/shared/ucl/apps/UDUNITS/2.2.20-gnu-4.9.2/include --with-udunits2-lib=/shared/ucl/apps/UDUNITS/2.2.20-gnu-4.9.2/lib';
install.packages ("udunits2", lib=mainLib, repos=repros, configure.args=udunits2Conf);
install.packages ("tmap", lib=mainLib, repos=repros);
# For Lucia Conde (l.conde@ucl.ac.uk) May 2017
install.packages ("rmarkdown", lib=mainLib, repos=repros);
# For RStudio server Oct 2017
install.packages ("tidyverse", lib=mainLib, repos=repros);
# For Cheng Zhang (cheng.zhang@ucl.ac.uk) March 2018
install.packages ("bio3d", lib=mainLib, repos=repros);
# For Cheng Zhang (cheng.zhang@ucl.ac.uk) May 2018
install.packages ("png", lib=mainLib, repos=repros);
# End of R_packages_UCL_5
# install.packages ("XXX", lib=mainLib, repos=repros);
|
/files/R_UCL/R_packages_UCL_5.R
|
permissive
|
gh3orghiu/rcps-buildscripts
|
R
| false
| false
| 6,940
|
r
|
#=====================================================================
# R_packages_UCL_5
#
# Install 5th batch of add on R packages for UCL R instllations.
#
# June 2016
# Latest update October 2017
mainLib <- Sys.getenv ("RLIB_MAIN");
dbLib <- Sys.getenv ("RLIB_DB");
repros <- Sys.getenv ("REPROS");
mainLib;
dbLib;
repros;
#
# For Antonia Ford (a.ford.11@ucl.ac.uk) - added Jan 2014
install.packages ("rgl", lib=mainLib, repos=repros);
install.packages ("fastmatch", lib=mainLib, repos=repros);
install.packages ("phangorn", lib=mainLib, repos=repros);
# For JAGS - added March 2014
install.packages ("coda", lib=mainLib, repos=repros);
install.packages ("rjags", lib=mainLib, repos=repros);
install.packages ("abind", lib=mainLib, repos=repros);
install.packages ("R2WinBUGS", lib=mainLib, repos=repros);
install.packages ("R2jags", lib=mainLib, repos=repros);
# For Stuart Peters (stuart.peters.13@ucl.ac.uk) June 2014
install.packages ("httpuv", lib=mainLib, repos=repros);
install.packages ("RJSONIO", lib=mainLib, repos=repros);
install.packages ("digest", lib=mainLib, repos=repros);
install.packages ("htmltools", lib=mainLib, repos=repros);
install.packages ("caTools", lib=mainLib, repos=repros);
install.packages ("shiny", lib=mainLib, repos=repros);
install.packages ("ade4", lib=mainLib, repos=repros);
install.packages ("ape", lib=mainLib, repos=repros);
install.packages ("plyr", lib=mainLib, repos=repros);
install.packages ("labeling", lib=mainLib, repos=repros);
install.packages ("dichromat", lib=mainLib, repos=repros);
install.packages ("colorspace", lib=mainLib, repos=repros);
install.packages ("munsell", lib=mainLib, repos=repros);
install.packages ("scales", lib=mainLib, repos=repros);
install.packages ("shiny", lib=mainLib, repos=repros);
install.packages ("gtable", lib=mainLib, repos=repros);
install.packages ("stringr", lib=mainLib, repos=repros);
install.packages ("reshape2", lib=mainLib, repos=repros);
install.packages ("proto", lib=mainLib, repos=repros);
install.packages ("ggplot2", lib=mainLib, repos=repros);
install.packages ("adegenet", lib=mainLib, repos=repros);
install.packages ("pegas", lib=mainLib, repos=repros);
install.packages ("stringdist", lib=mainLib, repos=repros);
# For Rodrigo Targino (r.targino.12@ucl.ac.uk) July 2014
install.packages ("HAC", lib=mainLib, repos=repros);
# For Mattia Cinelli (rebmmci@ucl.ac.uk) Nov 2014
install.packages ("e1071", lib=mainLib, repos=repros);
# Extra R packages needed for various Bioconductor packages.
install.packages ("gplots", lib=mainLib, repos=repros);
install.packages ("checkmate", lib=mainLib, repos=repros);
install.packages ("BBmisc", lib=mainLib, repos=repros);
install.packages ("base64enc", lib=mainLib, repos=repros);
install.packages ("sendmailR", lib=mainLib, repos=repros);
install.packages ("brew", lib=mainLib, repos=repros);
install.packages ("fail", lib=mainLib, repos=repros);
install.packages ("BatchJobs", lib=mainLib, repos=repros);
install.packages ("RMySQL", lib=mainLib, repos=repros);
install.packages ("R.methodsS3", lib=mainLib, repos=repros);
install.packages ("matrixStats", lib=mainLib, repos=repros);
install.packages ("base64", lib=mainLib, repos=repros);
install.packages ("gsmoothr", lib=mainLib, repos=repros);
install.packages ("R.cache", lib=mainLib, repos=repros);
install.packages ("R.filesets", lib=mainLib, repos=repros);
install.packages ("R.devices", lib=mainLib, repos=repros);
install.packages ("R.rsp", lib=mainLib, repos=repros);
install.packages ("PSCBS", lib=mainLib, repos=repros);
install.packages ("aroma.core", lib=mainLib, repos=repros);
install.packages ("R.huge", lib=mainLib, repos=repros);
install.packages ("truncnorm", lib=mainLib, repos=repros);
install.packages ("Rsolnp", lib=mainLib, repos=repros);
install.packages ("intervals", lib=mainLib, repos=repros);
install.packages ("colorRamps", lib=mainLib, repos=repros);
install.packages ("schoolmath", lib=mainLib, repos=repros);
install.packages ("LSD", lib=mainLib, repos=repros);
install.packages ("RcppArmadillo", lib=mainLib, repos=repros);
# For Matthew Jones (m.jones.12@ucl.ac.uk) June 2016 new in R 3.3.0
install.packages ("rstanarm", lib=mainLib, repos=repros);
# Required for the cancerit suite (https://github.com/cancerit)
install.packages ("gam", lib=mainLib, repos=repros);
install.packages ("VGAM", lib=mainLib, repos=repros);
install.packages ("poweRlaw", lib=mainLib, repos=repros);
# For Zahra Sabetsarvestani (ucakzsa@ucl.ac.uk) Aug 2016
install.packages ("mlr", lib=mainLib, repos=repros);
install.packages ("pracma", lib=mainLib, repos=repros);
install.packages ("softImpute", lib=mainLib, repos=repros);
install.packages ("caret", lib=mainLib, repos=repros);
install.packages ("quantreg", lib=mainLib, repos=repros);
install.packages ("randomForest", lib=mainLib, repos=repros);
# For Slava Mikhaylov from political science Sep 2016
install.packages ("relaimpo", lib=mainLib, repos=repros);
install.packages ("GGally", lib=mainLib, repos=repros);
install.packages ("effects", lib=mainLib, repos=repros);
install.packages ("HotDeckImputation", lib=mainLib, repos=repros);
install.packages ("psych", lib=mainLib, repos=repros);
# For use with snow examples
install.packages ("rlecuyer", lib=mainLib, repos=repros);
# More requsts from Political Science
install.packages ("rgdal", lib=mainLib, repos=repros);
install.packages ("rgeos", lib=mainLib, repos=repros);
install.packages ("erer", lib=mainLib, repos=repros);
install.packages ("panelAR", lib=mainLib, repos=repros);
install.packages ("arm", lib=mainLib, repos=repros);
install.packages ("systemfit", lib=mainLib, repos=repros);
# tmap requested by James Cheshire, Geography
# install V8's dependencies first, otherwise v8conf variables get lost in the interim
install.packages("Rcpp", lib=mainLib, repos=repros);
install.packages("jsonlite", lib=mainLib, repos=repros);
install.packages("curl", lib=mainLib, repos=repros);
v8conf <- 'INCLUDE_DIR=/shared/ucl/apps/v8/3.15/v8/include LIB_DIR=/shared/ucl/apps/v8/3.15/v8/out/x64.release/lib.target';
install.packages ("V8", lib=mainLib, repos=repros, configure.vars=v8conf);
udunits2Conf <- '--with-udunits2-include=/shared/ucl/apps/UDUNITS/2.2.20-gnu-4.9.2/include --with-udunits2-lib=/shared/ucl/apps/UDUNITS/2.2.20-gnu-4.9.2/lib';
install.packages ("udunits2", lib=mainLib, repos=repros, configure.args=udunits2Conf);
install.packages ("tmap", lib=mainLib, repos=repros);
# For Lucia Conde (l.conde@ucl.ac.uk) May 2017
install.packages ("rmarkdown", lib=mainLib, repos=repros);
# For RStudio server Oct 2017
install.packages ("tidyverse", lib=mainLib, repos=repros);
# For Cheng Zhang (cheng.zhang@ucl.ac.uk) March 2018
install.packages ("bio3d", lib=mainLib, repos=repros);
# For Cheng Zhang (cheng.zhang@ucl.ac.uk) May 2018
install.packages ("png", lib=mainLib, repos=repros);
# End of R_packages_UCL_5
# install.packages ("XXX", lib=mainLib, repos=repros);
|
## Produce the fourth required plot for Exploratory Data Anaylsis, Project 1
## Load and clean the data
## Subset to use only dates 2007-02-01 and 2007-02-02.
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, "electricConsumption.zip")
data <- read.table(unz("electricConsumption.zip", "household_power_consumption.txt"),
header = TRUE, sep = ";",
stringsAsFactors = FALSE )
data$datetime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date, "%d/%m/%Y")
sampleDates <- (data$Date == as.Date("2007-02-01") | data$Date == as.Date("2007-02-02"))
data <- data[sampleDates, ]
for (i in 3:9) {
data[,i] <- as.numeric(data[,i])
}
## Construct and save Plot 4 as a PNG file
png("plot4.PNG", width = 480, height = 480)
par(mfrow = c(2,2))
with(data, {
plot(datetime, Global_active_power, "l", xlab = "",
ylab = "Global Active Power")
plot(datetime, Voltage, "l")
plot(datetime, Sub_metering_1, "l", xlab = "",
ylab = "Energy sub metering")
lines(datetime, Sub_metering_2, "l", col = "red")
lines(datetime, Sub_metering_3, "l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
bty = "n", lty = 1, col = c("black", "red", "blue"))
plot(datetime, Global_reactive_power, "l")
})
dev.off()
|
/plot4.R
|
no_license
|
ngerew/ExData_Plotting1
|
R
| false
| false
| 1,499
|
r
|
## Produce the fourth required plot for Exploratory Data Anaylsis, Project 1
## Load and clean the data
## Subset to use only dates 2007-02-01 and 2007-02-02.
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, "electricConsumption.zip")
data <- read.table(unz("electricConsumption.zip", "household_power_consumption.txt"),
header = TRUE, sep = ";",
stringsAsFactors = FALSE )
data$datetime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date, "%d/%m/%Y")
sampleDates <- (data$Date == as.Date("2007-02-01") | data$Date == as.Date("2007-02-02"))
data <- data[sampleDates, ]
for (i in 3:9) {
data[,i] <- as.numeric(data[,i])
}
## Construct and save Plot 4 as a PNG file
png("plot4.PNG", width = 480, height = 480)
par(mfrow = c(2,2))
with(data, {
plot(datetime, Global_active_power, "l", xlab = "",
ylab = "Global Active Power")
plot(datetime, Voltage, "l")
plot(datetime, Sub_metering_1, "l", xlab = "",
ylab = "Energy sub metering")
lines(datetime, Sub_metering_2, "l", col = "red")
lines(datetime, Sub_metering_3, "l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
bty = "n", lty = 1, col = c("black", "red", "blue"))
plot(datetime, Global_reactive_power, "l")
})
dev.off()
|
var <- 1
str(var)#gets the type of var
attributes(var) # if var is a vector, show the attributes
attr(var, 'attributeName')#get a specific attribute list from a var
as.matrix(var) #if var is a vector, it transforms the vector values + attributes into a matrix
|
/cheatsheet.r
|
no_license
|
LaurensRietveld/RGraphAnalysis
|
R
| false
| false
| 263
|
r
|
var <- 1
str(var)#gets the type of var
attributes(var) # if var is a vector, show the attributes
attr(var, 'attributeName')#get a specific attribute list from a var
as.matrix(var) #if var is a vector, it transforms the vector values + attributes into a matrix
|
DESyn<-function(data,group){
geneid<-data[,1]
obs<-as.matrix(data[,-1])
repli1<-sum(group==0)
repli2<-sum(group==1)
cc<-dim(obs)[1]
###################################LRT test######################################################################
lrt.pvalue<-c(NA)
for (i in 1:cc){
if (all(obs[i,]==0)) {loglik0.overal<-0
}else {loglik0.overal<-fitdist(obs[i,],"nbinom",method="mle",control=list(maxit=2000000))$loglik}
if (all(obs[i,group==0]==0)) {loglik0.control<-0
}else {loglik0.control<-fitdist(obs[i,group==0],"nbinom",method="mle",control=list(maxit=2000000))$loglik}
if (all(obs[i,group==1]==0)) {loglik0.case<-0
}else {loglik0.case<-fitdist(obs[i,group==1],"nbinom",method="mle",control=list(maxit=2000000))$loglik}
lrt.stat<--2*(loglik0.overal-loglik0.control-loglik0.case)
lrt.pvalue[i]<-1-pchisq(lrt.stat,df=2)
}
###################################Proposed########################################################################
pvalue<-c(NA)
proposed.stat<-matrix(c(NA),nrow=cc,ncol=(choose(length(group),repli1)))
for (j in 1:(choose(length(group),repli1))){
grp<-rep(2,length(group))
grp[combn(length(group),repli1)[,j]]<-1
dm0<-DGEList(counts=obs,lib.size=rep(1,length(group)),norm.factors=rep(1,length(group)),group=grp)
disp.overal<-estimateDisp(dm0)$tagwise.dispersion
dm1<-DGEList(counts=obs[,grp==1],lib.size=rep(1,repli1),norm.factors=rep(1,repli1))
disp.control<-estimateDisp(dm1)$tagwise.dispersion
dm2<-DGEList(counts=obs[,grp==2],lib.size=rep(1,repli2),norm.factors=rep(1,repli2))
disp.case<-estimateDisp(dm2)$tagwise.dispersion
for (i in 1:cc){
if (all(obs[i,]==0)) {loglik.overal<-0
}else {loglik.overal<-fitdist(obs[i,],"nbinom",method="mle",fix.arg=list(size=1/disp.overal[i]),control=list(maxit=2000000))$loglik}
if (all(obs[i,grp==1]==0)) {loglik.control<-0
}else {loglik.control<-fitdist(obs[i,grp==1],"nbinom",method="mle",fix.arg=list(size=1/disp.control[i]),control=list(maxit=2000000))$loglik}
if (all(obs[i,grp==2]==0)) {loglik.case<-0
}else {loglik.case<-fitdist(obs[i,grp==2],"nbinom",method="mle",fix.arg=list(size=1/disp.case[i]),control=list(maxit=2000000))$loglik}
proposed.stat[i,j]<--2*(loglik.overal-loglik.control-loglik.case)
}
}
null.stat<-proposed.stat[lrt.pvalue>=0.1,]
index<-which(rowSums(t(combn(length(group),repli1)==which(group==0)))==repli1)
for (i in 1:cc){
pvalue[i]<-sum(null.stat>proposed.stat[i,index])/dim(null.stat)[1]/dim(null.stat)[2]
}
qvalue<-q.value(pvalue)
out<-data.frame(geneid=geneid,pvalue=pvalue,qvalue=qvalue)
return(out)
}
|
/DESyn.R
|
no_license
|
cmrf7/DESyn
|
R
| false
| false
| 2,553
|
r
|
DESyn<-function(data,group){
geneid<-data[,1]
obs<-as.matrix(data[,-1])
repli1<-sum(group==0)
repli2<-sum(group==1)
cc<-dim(obs)[1]
###################################LRT test######################################################################
lrt.pvalue<-c(NA)
for (i in 1:cc){
if (all(obs[i,]==0)) {loglik0.overal<-0
}else {loglik0.overal<-fitdist(obs[i,],"nbinom",method="mle",control=list(maxit=2000000))$loglik}
if (all(obs[i,group==0]==0)) {loglik0.control<-0
}else {loglik0.control<-fitdist(obs[i,group==0],"nbinom",method="mle",control=list(maxit=2000000))$loglik}
if (all(obs[i,group==1]==0)) {loglik0.case<-0
}else {loglik0.case<-fitdist(obs[i,group==1],"nbinom",method="mle",control=list(maxit=2000000))$loglik}
lrt.stat<--2*(loglik0.overal-loglik0.control-loglik0.case)
lrt.pvalue[i]<-1-pchisq(lrt.stat,df=2)
}
###################################Proposed########################################################################
pvalue<-c(NA)
proposed.stat<-matrix(c(NA),nrow=cc,ncol=(choose(length(group),repli1)))
for (j in 1:(choose(length(group),repli1))){
grp<-rep(2,length(group))
grp[combn(length(group),repli1)[,j]]<-1
dm0<-DGEList(counts=obs,lib.size=rep(1,length(group)),norm.factors=rep(1,length(group)),group=grp)
disp.overal<-estimateDisp(dm0)$tagwise.dispersion
dm1<-DGEList(counts=obs[,grp==1],lib.size=rep(1,repli1),norm.factors=rep(1,repli1))
disp.control<-estimateDisp(dm1)$tagwise.dispersion
dm2<-DGEList(counts=obs[,grp==2],lib.size=rep(1,repli2),norm.factors=rep(1,repli2))
disp.case<-estimateDisp(dm2)$tagwise.dispersion
for (i in 1:cc){
if (all(obs[i,]==0)) {loglik.overal<-0
}else {loglik.overal<-fitdist(obs[i,],"nbinom",method="mle",fix.arg=list(size=1/disp.overal[i]),control=list(maxit=2000000))$loglik}
if (all(obs[i,grp==1]==0)) {loglik.control<-0
}else {loglik.control<-fitdist(obs[i,grp==1],"nbinom",method="mle",fix.arg=list(size=1/disp.control[i]),control=list(maxit=2000000))$loglik}
if (all(obs[i,grp==2]==0)) {loglik.case<-0
}else {loglik.case<-fitdist(obs[i,grp==2],"nbinom",method="mle",fix.arg=list(size=1/disp.case[i]),control=list(maxit=2000000))$loglik}
proposed.stat[i,j]<--2*(loglik.overal-loglik.control-loglik.case)
}
}
null.stat<-proposed.stat[lrt.pvalue>=0.1,]
index<-which(rowSums(t(combn(length(group),repli1)==which(group==0)))==repli1)
for (i in 1:cc){
pvalue[i]<-sum(null.stat>proposed.stat[i,index])/dim(null.stat)[1]/dim(null.stat)[2]
}
qvalue<-q.value(pvalue)
out<-data.frame(geneid=geneid,pvalue=pvalue,qvalue=qvalue)
return(out)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/SIBER.R
\docType{package}
\name{SIBER}
\alias{SIBER}
\alias{SIBER-package}
\title{SIBER: A package for fitting Bayesian Ellipses to Stable Isotope Data}
\description{
The SIBER package provides tools for fitting multivariate normal
distributions to bivariate data using Bayesian Inference. These
distributions can then be used to calculate probability
distributions of Standard Ellise Areas for comparing groups of data,
or to calculate the 6 Layman metrics for comparing entire communities.
}
\section{SIBER functions}{
The SIBER functions ...
}
|
/man/SIBER.Rd
|
no_license
|
andrewcparnell/SIBER
|
R
| false
| false
| 635
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/SIBER.R
\docType{package}
\name{SIBER}
\alias{SIBER}
\alias{SIBER-package}
\title{SIBER: A package for fitting Bayesian Ellipses to Stable Isotope Data}
\description{
The SIBER package provides tools for fitting multivariate normal
distributions to bivariate data using Bayesian Inference. These
distributions can then be used to calculate probability
distributions of Standard Ellise Areas for comparing groups of data,
or to calculate the 6 Layman metrics for comparing entire communities.
}
\section{SIBER functions}{
The SIBER functions ...
}
|
##----------------------------------------------------------------------------------##
##--- SIMPLE LINEAR REGRESSION MODEL building ------##
##----------------------------------------------------------------------------------##
##--- Step 1: Clear environment variables ------------------------------------------##
##__________________________________________________________________________________##
##--- Step 2: Set working Directory ------------------------------------------------##
##setwd
getwd()
setwd("E:/SEM 3/Machine Learning")
##__________________________________________________________________________________##
##--- Step 3: Read the data from the csv file --------------------------------------##
cars_data = read.csv(file="Toyota_SimpleReg.csv",header=TRUE)
summary(cars_data)
str(cars_data)
##__________________________________________________________________________________##
##--- Step 4: Perform Exploratory Data Analysis and Data Pre-processing-------------##
## Drop any irrelevant attribute(s):
cars_data = cars_data[,-c(1,2)] #dropping model and id
str(cars_data)
## Summary of the data and look for any missing values:
## Correlation and Covariance between the attributes:
#Describe how the covarainace and correlation coefficients
cov(cars_data)
#The covariance of the age of car and price is -59136.11
#It indicates a negative linear relationship between two variables.
#This relations could be observed from the scatter plot also.
plot(cars_data$Age_06_15, cars_data$Price)
plot(cars_data$Age_06_15, cars_data$Price, xlab = "Age of the car", ylab = "Price in ($)", pch=18, col = "red")
cor(cars_data)
cor(cars_data$Age_06_15, cars_data$Price)
#The correlation coefficient of the age of car and price is -8765905.
#Since the value is close to 1 and has a -ve sighn, we can concldue that the variables are strongly negatively correlated
#Do the attributes have a good enough correlation coefficient to support linear regression model building?
# between -1 and 1
##__________________________________________________________________________________##
##--- Step 5: Split the data into train and test datasets --------------------------##
#Split in (train:test) in (80:20) ratio
rows = seq(1, nrow(cars_data),1)
set.seed(123)
trainRows = sample(rows,(70*nrow(cars_data))/100)
cars_train = cars_data[trainRows,]
cars_test = cars_data[trainRows,]
trainRows1 = sample(rows,(80*nrow(cars_data))/100)
cars_train1 = cars_data[trainRows1,]
cars_test1 = cars_data[trainRows1,]
trainRows2 = sample(rows,(90*nrow(cars_data))/100)
cars_train2 = cars_data[trainRows2,]
cars_test2 = cars_data[trainRows2,]
##__________________________________________________________________________________##
##--- Step 6: Linear regression model building--------------------------------------##
LinReg = lm(Price ~ Age_06_15, data = cars_train)
LinReg
coefficients(LinReg)
LinReg1 = lm(Price ~ Age_06_15, data = cars_train1)
LinReg
coefficients(LinReg1)
LinReg2 = lm(Price ~ Age_06_15, data = cars_train2)
LinReg
coefficients(LinReg2)
## Summary of model:
summary(LinReg)
plot(LinReg$residuals)
summary(LinReg)
summary(LinReg)
#Extract the intercept coefficient from the linear regression model
#Extract the residual values
##__________________________________________________________________________________##
##--- Step 7: Check for validity of linear regression assumptions ------------------##
#HINT: plot the 4 graphs to check. Write your comments
##__________________________________________________________________________________##
##--- Step 8: Predict on testdata --------------------------------------------------##
##__________________________________________________________________________________##
##--- Step 9: Error Metrics --------------------------------------------------------##
#Error verification on train data
#Error verification on test data
##__________________________________________________________________________________##
##--- Step 10: Confidence and Prediction Intervals----------------------------------##
#Find the confidence and prediction intervals and plot them for the WHOLE dataset
##__________________________________________________________________________________##
#-----------------------end---------------------------------------------------------##
|
/Simple_linear_Regression.R
|
no_license
|
sumneema/ML
|
R
| false
| false
| 4,393
|
r
|
##----------------------------------------------------------------------------------##
##--- SIMPLE LINEAR REGRESSION MODEL building ------##
##----------------------------------------------------------------------------------##
##--- Step 1: Clear environment variables ------------------------------------------##
##__________________________________________________________________________________##
##--- Step 2: Set working Directory ------------------------------------------------##
##setwd
getwd()
setwd("E:/SEM 3/Machine Learning")
##__________________________________________________________________________________##
##--- Step 3: Read the data from the csv file --------------------------------------##
cars_data = read.csv(file="Toyota_SimpleReg.csv",header=TRUE)
summary(cars_data)
str(cars_data)
##__________________________________________________________________________________##
##--- Step 4: Perform Exploratory Data Analysis and Data Pre-processing-------------##
## Drop any irrelevant attribute(s):
cars_data = cars_data[,-c(1,2)] #dropping model and id
str(cars_data)
## Summary of the data and look for any missing values:
## Correlation and Covariance between the attributes:
#Describe how the covarainace and correlation coefficients
cov(cars_data)
#The covariance of the age of car and price is -59136.11
#It indicates a negative linear relationship between two variables.
#This relations could be observed from the scatter plot also.
plot(cars_data$Age_06_15, cars_data$Price)
plot(cars_data$Age_06_15, cars_data$Price, xlab = "Age of the car", ylab = "Price in ($)", pch=18, col = "red")
cor(cars_data)
cor(cars_data$Age_06_15, cars_data$Price)
#The correlation coefficient of the age of car and price is -8765905.
#Since the value is close to 1 and has a -ve sighn, we can concldue that the variables are strongly negatively correlated
#Do the attributes have a good enough correlation coefficient to support linear regression model building?
# between -1 and 1
##__________________________________________________________________________________##
##--- Step 5: Split the data into train and test datasets --------------------------##
#Split in (train:test) in (80:20) ratio
rows = seq(1, nrow(cars_data),1)
set.seed(123)
trainRows = sample(rows,(70*nrow(cars_data))/100)
cars_train = cars_data[trainRows,]
cars_test = cars_data[trainRows,]
trainRows1 = sample(rows,(80*nrow(cars_data))/100)
cars_train1 = cars_data[trainRows1,]
cars_test1 = cars_data[trainRows1,]
trainRows2 = sample(rows,(90*nrow(cars_data))/100)
cars_train2 = cars_data[trainRows2,]
cars_test2 = cars_data[trainRows2,]
##__________________________________________________________________________________##
##--- Step 6: Linear regression model building--------------------------------------##
LinReg = lm(Price ~ Age_06_15, data = cars_train)
LinReg
coefficients(LinReg)
LinReg1 = lm(Price ~ Age_06_15, data = cars_train1)
LinReg
coefficients(LinReg1)
LinReg2 = lm(Price ~ Age_06_15, data = cars_train2)
LinReg
coefficients(LinReg2)
## Summary of model:
summary(LinReg)
plot(LinReg$residuals)
summary(LinReg)
summary(LinReg)
#Extract the intercept coefficient from the linear regression model
#Extract the residual values
##__________________________________________________________________________________##
##--- Step 7: Check for validity of linear regression assumptions ------------------##
#HINT: plot the 4 graphs to check. Write your comments
##__________________________________________________________________________________##
##--- Step 8: Predict on testdata --------------------------------------------------##
##__________________________________________________________________________________##
##--- Step 9: Error Metrics --------------------------------------------------------##
#Error verification on train data
#Error verification on test data
##__________________________________________________________________________________##
##--- Step 10: Confidence and Prediction Intervals----------------------------------##
#Find the confidence and prediction intervals and plot them for the WHOLE dataset
##__________________________________________________________________________________##
#-----------------------end---------------------------------------------------------##
|
library(stringr)
library(rlang)
library(dplyr)
library(purrr)
function_list <- lsf.str("package:locatr")
fmt_functions <- function_list %>% .[str_detect(.,"^fmt_") & !str_detect(.,"single")] %>% sort()
tidyxl_df <-
locatr_example("worked-examples.xlsx") %>% xlsx_cells_fmt(sheets = "pivot-annotations")
fmt_functions_test <-
map(fmt_functions[-c(14,15,19,20)],
~invoke(.x,format_id_vec = tidyxl_df$local_format_id,
sheet_formats = attr(tidyxl_df, "formats")) %>%
as.character)
usethis::use_data(fmt_functions_test, overwrite = TRUE)
|
/inst/extdata/data-raw/fmt_functions_test.R
|
permissive
|
jimsforks/locatr
|
R
| false
| false
| 582
|
r
|
library(stringr)
library(rlang)
library(dplyr)
library(purrr)
function_list <- lsf.str("package:locatr")
fmt_functions <- function_list %>% .[str_detect(.,"^fmt_") & !str_detect(.,"single")] %>% sort()
tidyxl_df <-
locatr_example("worked-examples.xlsx") %>% xlsx_cells_fmt(sheets = "pivot-annotations")
fmt_functions_test <-
map(fmt_functions[-c(14,15,19,20)],
~invoke(.x,format_id_vec = tidyxl_df$local_format_id,
sheet_formats = attr(tidyxl_df, "formats")) %>%
as.character)
usethis::use_data(fmt_functions_test, overwrite = TRUE)
|
\encoding{UTF-8}
\name{bipartite-package}
\alias{bipartite-package}
\alias{bipartite}
\docType{package}
\title{
Analysis of bipartite ecological webs
}
\description{
Bipartite provides functions to visualise webs and calculate a series of indices commonly used to describe pattern in (ecological) networks, a.k.a. webs. It focusses on webs consisting of only two levels, e.g. pollinator-visitation or predator-prey webs. Visualisation is important to get an idea of what we are actually looking at, while the indices summarise different aspects of the webs topology.}
\details{
%We only had three types of bipartite webs in mind when writing this package: seed-disperser, plant-pollinator and host-parasitoid systems. In how far it makes sense to use these functionalities for other systems (or indeed for these systems) lies in the hands of the user. Please refer to the literature cited for details on the theory behind the indices.
%
Input for most analyses is an interaction matrix of m species from one group (\dQuote{higher}) with n species from another group (\dQuote{lower}), i.e. a n x m matrix, where higher level species are in columns, lower level species in rows. Column and row names can be provided. This is fundamentally different from \dQuote{one-mode} webs, which are organised as k x k matrix, i.e. one group of species only, in which each species can interact with each other. Such a format is incompatible with the functions we provide here. (Note, however, that functions \code{\link{as.one.mode}} and \code{\link{web2edges}} are convenience functions to morph bipartite networks into one-mode webs. Furthermore, some indices build on one-mode networks and are called from bipartite.)
Before you start with the network, you have to get the data into the right shape. The function \code{\link{frame2webs}} aims to facilitate this process. Arranging a web, e.g. by size, is supported by \code{\link{sortweb}}.
The typical first step is to \bold{visualise} the network. Two functions are on offer here: one (\code{\link{visweb}}) simply plots the matrix in colours depicting the strength of an interaction and options for re-arranging columns and rows (e.g. to identify compartments or nesting). The other function (\code{\link{plotweb}}) plots the actual web with participants (as two rows of rectangles) connected by lines (proportional to interaction strength). Both can be customised by many options.
The second step is to \bold{calculate indices} describing network topography. There are \bold{three} different levels this can be achieved at: the entire web (using function \code{\link{networklevel}}), at the level of each group (also using function \code{\link{networklevel}}) or the individual species (using function \code{\link{specieslevel}}). Most other functions in the package are helpers, although some can be called on their own and return the respective result (\code{\link{dfun}}, \code{\link{H2fun}} and \code{\link{second.extinct}} with \code{\link{slope.bipartite}}).
The third step is to \bold{compare results to null models}. Many interaction matrices are very incomplete snapshots of the true underlying network (e.g. a one-week sampling of a pollination network on a patch of 4 x 4 meters). As a consequence, many species were rarely observed, many are singletons (only one recording). To make analyses comparable across networks with different sampling intensity and number of species per group, we need a common yardstick. We suggest that users should use a null model, i.e. an algorithm that randomises entries while constraining some web properties (such as dimensions, marginal totals or connectance). The function \code{\link{nullmodel}} provides a few such null models, but this is a wide field of research and we make no recommendations (actually, we do: see Dormann et al. 2009 and Dormann 2011, both shipping in the doc-folder of this package). You can also simulate networks using \code{\link{genweb}} or \code{\link{null.distr}}.
Finally, bipartite comes with 23 quantitative pollination network data sets taken from the NCEAS interaction webs data base (use \code{data(package="bipartite")} to show their names) and it has a few miscellaneous functions looking at some special features of bipartite networks (such as modularity: \code{\link{computeModules}} or apparent competition: \code{\link{PAC}}).
\bold{Speed}: The code of bipartite is almost exclusively written in R. You can increase the speed a bit (by 30 to 50 \%, depending on the functions you use) by compiling functions on-the-fly. To do so, you need to load the \pkg{compiler} package and type: \code{enableJIT(3)}. The first time you call a function, it will be compiled to bytecode (just-in-time: jit), which takes a few seconds, but the second call will be substantially faster than without compilation. In the few tests we have run, this improvement was NOT substantial (i.e. a few tens of percent), indicating, I guess, that our R code wasn't too bad. See \pkg{compiler}-help files or \url{http://www.r-statistics.com/2012/04/speed-up-your-r-code-using-a-just-in-time-jit-compiler/} for details.
See help pages for details and examples.
\tabular{ll}{
Package: \tab bipartite\cr
Type: \tab Package\cr
Version: \tab 2.06\cr
Date: \tab 2015-09-25\cr
License: \tab GPL \cr
}
}
\section{versionlog}{
Please see help page \code{\link{versionlog}} for all changes and updates prior to version 2.00. This page will only list most recent changes.
\itemize{
\item 2.07 (release date: XX-YYYY-2015)
\describe{ % begin describe 2.07
\item{Some explanation addded to \code{\link{czvalues}},}{ where a z-value of NA is returned if a species is alone (in its trophic level) in a module. This is due to the way z-values are computed, and not a bug.}
} % end describe 2.07
\item 2.06 (release date: 29-SEP-2015)
\describe{ % begin describe 2.06
\item{Bug fix in \code{\link{C.score}},}{ which did not compute the maximum number of possible checkerboards correctly, and hence let the normalised C-score to be incorrect. Now it uses a brute-force approach, which works fine but takes its time.}
\item{Function \code{\link{nestedcontribution}}}{was not exported (i.e. not listed in the namespace file). Fixed. Thanks to Wesley Dátillo for reporting.}
\item{Help page of \code{\link{specieslevel}}}{now correctly described a species' degree as sum of its links. Thanks to Bernhard Hoiß for the correction!}
\item{C++-warnings addressed:}{outcommented some unused variables in dendro.h and removed some fprintf-warnings in bmn5.cc}
\item{Little bug fix in \code{\link{vaznull}}:}{Threw an error when matrix was without 0s. Thanks to Thais Zanata for reporting.}
} % end describe 2.06
\item 2.05 (release date: 24-Nov-2014)
\describe{ % begin describe 2.05
\item{New function \code{\link{nestedcontribution}}}{which computes the contribution of each species to the overall nestedness, based on Bascompte et al. 2003 and as used by Saavedra et al. 2011. Many thanks to Daniel Stouffer for contributing this function!}
\item{New function \code{\link{mgen}}:}{this function is based on R-code written by Diego Vázquez (many thanks for sending the code), with a bit of brushing up options by CFD. The function takes a probability matrix generated by whatever mechanism and builds a null model from it. This is a niffty little idea, making null modelling concerned with generating ideas on what makes an interaction probable and leaving the step of producing and integer-network of simulated interactions to this function.}
\item{minor fixes in \code{\link{networklevel}}}{``weighted connectance'' was only returned when ``linkage density'' was in ``index'' call; now also available on its own. Also sligthly revised the help file.}
\item{\code{\link{nested}} with option \option{weighted NODF} called the unsorted version of this function,}{while calling the same index in \code{\link{networklevel}} called the sorted. This is not nice (although not strictly wrong). Now both call the sorted version and users have to directly invoke \code{nestednodf} for the unsorted option. Many thanks to Julian Resasco for reporting!}
\item{Changes to the help page of \code{\link{vaznull}}:}{I (CFD) misread the original paper introducing this null model and hence assumed that\code{\link{vaznull}} would constrain marginal totals \bold{and} connectance. However, this was not intended in Diego Vázquez original implementation and never stated anywhere (except in the help pages of this function here in bipartite). Hence, the help pages were changed to now reflect both intention and actual action of this function. This also means that currently only one null model with constrained marginal totals \bold{and} connectance is available: \code{\link{swap.web}}. Many thanks to Diego for clearing this up!}
\item{Some example code had to be re-written}{to adapt to the upcoming/new structure of \pkg{vegan}, which got rid of function \code{commsimulator} (replaced by \code{simulate}). Many thanks to Jari Oksanen for informing me about this!}
\item{Added an error message}{to function \code{\link{second.extinct}} for the case that a user wants to provide an extinction sequence for both trophic levels. There is no obvious way to simulate this across the two groups, and hence it is not implemented. Also added error messages for non-matching vector/web dimensions and alike.}
} % end describe 2.05
\item 2.04 (release date: 25-Mar-2014)
\describe{ % begin describe 2.04
\item{R-C++-communication bug fixed in \code{\link{computeModules}}:}{This bug has been a constant thorn in my side. Somehow the C-code behind \code{computeModules} could only be called once. On second call, it returned an error because somehow it kept some old files in memory. So far, I used a work-around (unloading and re-loading the dynamic library), which only worked on Windows and Mac. I still don't fully understand it, but thanks to Tobias Hegemann (whom I paid for being more competent than myself) we now have a function running bug-free on all platforms. (Deep sigh of relief.)}
\item{The call of index ``functional complementarity'' through \code{\link{networklevel}} did not work.}{Fixed this legacy issue, which was due to a confusion created by the index' earlier name of ``functional diversity''.}
\item{Help page to \code{\link{specieslevel}} gave incomplete name for one index:}{Should be \option{interaction push pull}; also the function itself had the ``push pull''-bit messed up. Thanks to Natacha Chacoff for reporting!}
\item{Sequence of indices differed between lower and higher level. (Fixed.)}{Both should be the same and should fit the description in the help file. Thanks to Jimmy O'Donnell for reporting!}
} % end describe 2.04
\item 2.03 (release date: 15-Jan-2014)
\describe{ % begin describe 2.03
\item{Some ghost text led to conflicts with the updated package checking.}{Ghost text deleted. Thanks to Brian Ripley of the R-Team and CRAN for not only reporting the issue but also pointing to its solution!}
\item{Option \option{empty.web} added to \code{\link{specieslevel}}:}{Similar to the argument in \code{\link{networklevel}}; non-interacting species from the network were always excluded so far; new option \option{FALSE} not fully tested yet.}
\item{Minor bug fix in \code{\link{specieslevel}}:}{ ``pollination support index'' returned ``PSI''; ``PDI'' now referenced correctly as ``paired differences index''.}
\item{Simplification in \code{\link{grouplevel}} and correspondingly in \code{\link{networklevel}}:}{Previously, \code{index="generality"} or \code{"vulnerability"} was identical to \code{"effective partners"} with option \code{weighted=TRUE}, but different for \code{weighted=FALSE} (to which only \code{"effective partners"} responded). We reduced this to one index called "generality" or "vulnerability" (depending on the focal group), but which will now give the non-weighted mean if option \code{weighted=FALSE}. It can still be called by "effective partners" for backward compatibility.}
\item{Function \code{\link{grouplevel}} used \code{fd} wrongly!}{Instead of returning the value for rows, it returned the functional diversity for columns (and vice versa). We also used the opportunity to rename the index to its correct name: ``functional complementarity'' and the function to \code{\link{fc}}. Help pages for \code{\link{fc}} and \code{\link{grouplevel}} were adapted accordingly. Thanks to Mariano Devoto for pointing out this mistake!}
\item{New index ``weighted connectance'' in function \code{\link{networklevel}}:}{This index is simply computed as linkage density divided by number of species in the network. Note that using \option{empty.web=TRUE} will affect this value (which is intended). Thanks to Becky Morris for suggesting to add this index here.}
\item{Help page for function \code{\link{PDI}} corrected.}{Thanks to Timothy Poisot for reporting some issues in the help page.}
} % end describe 2.03
\item 2.02 (release date: 30-Sep-2013)
\describe{ % begin describe 2.02
\item{Glitch fixed in \code{\link{grouplevel}} (thus also affecting \code{networklevel}).}{Networks with only one species in one of the two levels resulted in errors, rather than simply return NA for C-score and secondary extinction computation. Thanks to whoever it was for reporting (at the INTECOL workshop).}
\item{Minor bug fixes in \code{\link{specieslevel}}:}{Gave error messages for closeness and betweenness if the network had no shortest path. Now returns a warning and NAs instead. Reported: JF.}
\item{Minor bux fix in \code{\link{networklevel}}:}{Failed to work when an index was listed twice in the function call. Reported: JF.}
\item{New function \code{\link{r2dexternal}}:}{This function is a null model algorithm like Patefields (\code{r2dtable}, but it excepts externally measured abundances to compute the null model-expectation. Experimental.}
\item{Memory leak in \code{\link{computeModules}} fixed.}{Because some object was not deleted, memory consumption of this function shot through the roof (with time). Since R has a somewhat weird way of handling memory, I think that also subsequent operations were slower (because the dynamically expanded memory is not being shrunken again, which is a problem if you use the hard drive as RAM). Thanks to Florian Hartig for investing the time to fix it!}
} % end describe 2.02
\item 2.01 (release date: 28-Jun-2013)
This release features smoothing of various glitches that were introduced when we cleaned up the code for version 2.00.
\describe{ % begin describe 2.01
\item{New index for \code{\link{specieslevel}}:}{Computes the nestedness rank (as proposed by Alarcon et al. 2008). Can also be employed directly using the \bold{new function} \code{\link{nestedrank}} with options for weighting for number of interactions per link, normalising the rank and different method to compute the nestedness-arranged matrix.}
\item{Polishing \code{\link{specieslevel}}:}{Now returns an error message if the index is not recognised, instead of an empty list.}
\item{Function \code{\link{plotweb}}}{received an option to plot additional individuals of a species in different ways. For a host-parasitoid network, some hosts are not parasitised. This data vector can now be interpreted in two ways, making the plotting function a bit more flexible.} %Thanks to Jochen Fründ for implementing.
\item{Function \code{\link{degreedistr}}}{can now be invoked for each level separately. Also arguments can be passed to the plotting options.}
\item{New data set \code{\link{junker2013}}:}{a nice and large pollination network. Thanks to Robert Junker for providing this data set!}
\item{Fixed computation of secondary extinction slopes for both levels simultaneously}{for random extinction sequences. This was so far not possible, because the function did not combine extinction sequences of different lengths. This was simply an oversight, reported by Richard Lance. (Thanks!)}
} % end describe 2.01
\item 2.00 (release date: 15-Mar-2013)
A new version number usually indicates substantial changes. In this case, we have re-named and re-grouped some of the output of \code{\link{networklevel}} and \code{\link{specieslevel}} for greater consistency and transparency. Beware! Running the same functions now (2.00 and up) will yield different results to <2.00 (because the same values are now in a different sequence).
We also started carefully renaming indices and re-writing help files. The main reason is that we started this work thinking of pollination networks. Over time, however, other types of ecological networks came into focus, and now also non-ecological networks are on the table. Thus, we started (and shall continue) referring to lower and higher levels, rather than plant and pollinators, hosts and predators or even trophic levels. Thus, in our emerging nomenclature the two levels are referred to as \dQuote{groups} (their members remain \dQuote{species} interacting with their \dQuote{partners} in the other group).
Please read (or at least skim) the help pages before using a function of version 2.00 for the first time.
In function \code{\link{specieslevel}} indices can now be computed for levels separately (or together). Few user-visible changes, but complete re-structuring under the hood. Option \option{species number} was moved to \code{\link{grouplevel}} as \option{number of species}.
In the new function \code{\link{grouplevel}} we collected all indices that can be computed for each of the two groups (i.e. trophic or other levels). Indices can be computed for each group separately or for both simultaneously. All group-level indices are also accessible through \code{\link{networklevel}}!
In the new function \code{\link{linklevel}} we collected all indices that can be computed for each cell of the bipartite matrix. Currently, there are few such indices, however.
In function \code{\link{networklevel}} we dropped the plotting options. Users wanting to plot degree distributions or extinction slopes are encouraged to use the functions \code{\link{degreedistr}} and \code{\link{slope.bipartite}}, respectively.
Furthermore, due to licensing issues, we copy-pasted several functions from the package \bold{tnet}, created and maintained by Tore Opsahl, to bipartite. We have so far called these functions from tnet, but only recently did R start to enforce license compatibility, which caused this step (bipartite being GPL and tnet being CC by-NC 3.0). We are really very grateful to Tore for allowing us to include the following functions: \code{\link{as.tnet}}, \code{\link{betweenness_w}}, \code{\link{closeness_w}}, \code{\link{clustering_tm}}, \code{\link{distance_w}}, \code{\link{symmetrise_w}}, \code{\link{tnet_igraph}}.
Here a more detailed list of changes:
\describe{
\item{\code{\link{networklevel}}}{
\itemize{
\item Function call and output now more consistent in naming and sequence. When higher and lower level indices are given (e.g. extinction slopes, number of shared partners), the first will always be the one referring to the property of the \emph{lower} level. From a pollinator network perspective, the first value in such a pair describes a plant-level index, the second a pollinator-level index.
\item Indices \option{mean interaction diversity} dropped from \code{\link{networklevel}}. We found no reference to this metric and saw little use for it. It is very similar to vulnerability/generality and can easily be computed from the output of \code{\link{specieslevel}} as \code{mean(specieslevel(web, index="diversity"))}.
\item Now also accepts non-integer values as input. The argument \option{H2_integer} will then automatically be set to FALSE. Will return NA for those indices that cannot be computed (e.g. Fisher's alpha). As a knock-on effect, \code{\link{H2fun}} had to be slightly adapted to round to machine precision when searching for H2min. (A somewhat technical detail, but making \code{H2fun} getting caught sometimes.)
} % end itemize
}
\item{New function \code{\link{grouplevel}}}{in which we collected indices that can be computed for each of the two groups (i.e. trophic or other levels). Indices can be computed for each group separately or for both simultaneously. All group-level indices are also accessible through \code{\link{networklevel}}!}
\item{New function \code{\link{linklevel}}}{in which we collect indices that can be computed for each cell of the bipartite matrix.}
% \item{Index \option{ISA} = \option{interaction strength asymmetry} = \option{dependence asymmetry}} dropped from \code{\link{networklevel}}{We found no study that constructively used this metric and saw little use for it. It can easily be computed from the output of \code{\link{specieslevel}} in two lines of code: \code{out <- specieslevel(web, index="dependence"); mean(abs(out[[1]][[1]]-out[[2]][[1]])/pmax(out[[1]][[1]], out[[2]][[1]]), na.rm=TRUE)}. }
\item{New option to \code{\link{PDI}}:}{\option{normalise=FALSE} offers the option of using the index as originally proposed, although we prefer to use TRUE and made this the default. }
\item{Corrected network \code{\link{bezerra2009}}.}{Network was actually the transpose of the correct network and hence wrongly had plant species as columns.}
\item{New function \code{\link{endpoint}}}{computes end-point degrees following Barrat et al. (2004); one of the indices computed at \code{\link{linklevel}}.}
\item{New function \code{\link{frame2webs}}}{helps organising data into one or more webs.}
\item{New function \code{\link{webs2array}}}{helps organising webs into one array.}
\item{Function \code{\link{specieslevel}}}{gained two new indices (thanks to Jochen Fründ): \option{proportional} \option{similarity} and \option{proportional generality}. See help page of that function for details.}
\item{New function \code{\link{npartite}}}{Experimental function to analyse more-than-2-level networks.}
\item{\code{\link{visweb}}}{now obeys the label size to make sure labels are always in the plotting area. Thanks to Zachary Grinspan %(no, he's not a character from Harry Potter, but he does have a sense of humour)
for drawing our attention to this issue.}
\item{Little bug fix in \code{\link{second.extinct}}}{Function failed for argument \option{participant="both"} because I filled the extinction sequence with the wrong number of 0s (to achieve always the same dimensionality of results in repeated runs). Thanks to Carine Emer for reporting!}
\item{\code{\link{specieslevel}}}{failed to work for non-matrix data (i.e. \code{data.frames}). It now coerces \code{data.frames} to \code{matrix} as a first step and hence should work also on \code{data.frame}s. Thanks to Marina Wolowski for drawing our attention to this problem.}
\item{Minor bug fix in \code{\link{dfun}}:}{When external abundances were provided with a 0 in it, \code{dfun} could throw up \code{Inf}-values. Reported by Indrani Singh and fixed by Jochen Fründ.}
\item{Settings for functions called by \code{\link{nested}}}{are now enshrined in stone. The initial reason was to set only the default for one function (\code{\link{nestedness}}) to a faster setting (\option{null.models=FALSE}), but then I decided to restrict all settings to the defaults of the functions called (except for this one option).}
\item{Bug fix for the rarely used function \code{\link{null.t.test}}:}{Did not work if only one index was given.}
} % end of describe 2.00
} % end of versionlog's itemize
} % end of section versionlog
\author{
Carsten F. Dormann, Jochen Fründ and Bernd Gruber, with additional code from many others (referred to in the respective help file), noticeably from Tore Opsahl's tnet package.
Maintainer: Carsten Dormann \email{carsten.dormann@biom.uni-freiburg.de}
}
\references{
Alarcon, R., Waser, N.M. and Ollerton, J. 2008. Year-to-year variation in the topology of a plant-pollinator interaction network. \emph{Oikos} \bold{117}, 1796--1807
Almeida-Neto, M. and Ulrich, W. (2011) A straightforward computational approach for measuring nestedness using quantitative matrices. \emph{Environmental Modelling & Software}, \bold{26}, 173--178
Bascompte, J., Jordano, P. and Olesen, J. M. (2006) Asymmetric coevolutionary networks facilitate biodiversity maintenance. \emph{Science} \bold{312}, 431--433
Bersier, L. F., Banasek-Richter, C. and Cattin, M. F. (2002) Quantitative descriptors of food-web matrices. \emph{Ecology} \bold{83}, 2394--2407
Blüthgen, N., Menzel, F. and Blüthgen, N. (2006) Measuring specialization in species interaction networks. \emph{BMC Ecology} \bold{6}, 12
Blüthgen, N., Menzel, F., Hovestadt, T., Fiala, B. and Blüthgen, N. (2007) Specialization, constraints, and conflicting interests in mutualistic networks. \emph{Current Biology} \bold{17}, 1--6
Corso G., de Araújo A.I.L. and de Almeida A.M. (2008) A new nestedness estimator in community networks. \emph{arXiv}, 0803.0007v1 [physics.bio-ph]
Dalsgaard, B., A. M. Martín González, J. M. Olesen, A. Timmermann, L. H. Andersen, and J. Ollerton. (2008) Pollination networks and functional specialization: a test using Lesser Antillean plant-hummingbird assemblages. \emph{Oikos} \bold{117}, 789--793
Devoto M., Bailey S., Craze P. & Memmott J. (2012) Understanding and planning ecological restoration of plant-pollinator networks.
\emph{Ecology Letters} \bold{15}, 319--328
Dormann, C.F., Fründ, J., Blüthgen, N., and Gruber, B. (2009) Indices, graphs and null models: analysing bipartite ecological networks. \emph{The Open Ecology Journal} \bold{2}, 7--24
Dormann, C.F. (2011) How to be a specialist? Quantifying specialisation in pollination networks. \emph{Network Biology} \bold{1}, 1--20
Galeano J., Pastor J.M. and Iriondo J.M. (2008) Weighted-Interaction Nestedness Estimator (WINE): A new estimator to calculate over frequency matrices. \emph{arXiv} 0808.3397v1 [physics.bio-ph]
Martín Gonzáles, A.M., Dalsgaard, B. and Olesen, J.M. (2009) Centrality measures and the importance of generalist species in pollination networks. \emph{Ecological Complexity}, \bold{7}, 36--43
Memmott, J., Waser, N. M. and Price, M. V. (2004) Tolerance of pollination networks to species extinctions. \emph{Proceedings of the Royal Society B} \bold{271}, 2605--2611
Morris, R. J., Lewis, O. T. and Godfray, H. C. J. (2004) Experimental evidence for apparent competition in a tropical forest food web. \emph{Nature} \bold{428}, 310--313
Morris, R. J., Lewis, O. T. and Godfray, H. C. J. (2005) Apparent competition and insect community structure: towards a spatial perspective. \emph{Annales Zoologica Fennici} \bold{42}, 449--462.
Müller, C. B., Adriaanse, I. C. T., Belshaw, R. and Godfray, H. C. J. (1999) The structure of an aphid-parasitoid community. \emph{Journal of Animal Ecology} \bold{68}, 346--370
Poisot, T., Lepennetier, G., Martinez, E., Ramsayer, J., and Hochberg, M.E. (2011a) Resource availability affects the structure of a natural bacteria-bacteriophage community. \emph{Biology Letters} \bold{7}, 201--204
Poisot, T., Bever, J.D., Nemri, A., Thrall, P.H., and Hochberg, M.E. (2011b) A conceptual framework for the evolution of ecological specialisation. \emph{Ecology Letters} \bold{14}, 841--851
Tylianakis, J. M., Tscharntke, T. and Lewis, O. T. (2007) Habitat modification alters the structure of tropical host-parasitoid food webs. \emph{Nature} \bold{445}, 202--205
Vázquez, D. P. and Aizen, M. A. (2004) Asymmetric specialization: A pervasive feature of plant-pollinator interactions. \emph{Ecology} \bold{85}, 1251--1257
Vázquez, D.P., Chacoff, N.,P. and Cagnolo, L. (2009) Evaluating multiple determinants of the structure of plant-animal mutualistic networks. \emph{Ecology} \bold{90}, 2039--2046.
}
\keyword{ package }
\examples{
\dontrun{
data(Safariland)
plotweb(Safariland)
visweb(Safariland)
networklevel(Safariland)
specieslevel(Safariland)
}
}
|
/bipartite/man/bipartite-package.Rd
|
no_license
|
efcaguab/bipartite
|
R
| false
| false
| 28,074
|
rd
|
\encoding{UTF-8}
\name{bipartite-package}
\alias{bipartite-package}
\alias{bipartite}
\docType{package}
\title{
Analysis of bipartite ecological webs
}
\description{
Bipartite provides functions to visualise webs and calculate a series of indices commonly used to describe pattern in (ecological) networks, a.k.a. webs. It focusses on webs consisting of only two levels, e.g. pollinator-visitation or predator-prey webs. Visualisation is important to get an idea of what we are actually looking at, while the indices summarise different aspects of the webs topology.}
\details{
%We only had three types of bipartite webs in mind when writing this package: seed-disperser, plant-pollinator and host-parasitoid systems. In how far it makes sense to use these functionalities for other systems (or indeed for these systems) lies in the hands of the user. Please refer to the literature cited for details on the theory behind the indices.
%
Input for most analyses is an interaction matrix of m species from one group (\dQuote{higher}) with n species from another group (\dQuote{lower}), i.e. a n x m matrix, where higher level species are in columns, lower level species in rows. Column and row names can be provided. This is fundamentally different from \dQuote{one-mode} webs, which are organised as k x k matrix, i.e. one group of species only, in which each species can interact with each other. Such a format is incompatible with the functions we provide here. (Note, however, that functions \code{\link{as.one.mode}} and \code{\link{web2edges}} are convenience functions to morph bipartite networks into one-mode webs. Furthermore, some indices build on one-mode networks and are called from bipartite.)
Before you start with the network, you have to get the data into the right shape. The function \code{\link{frame2webs}} aims to facilitate this process. Arranging a web, e.g. by size, is supported by \code{\link{sortweb}}.
The typical first step is to \bold{visualise} the network. Two functions are on offer here: one (\code{\link{visweb}}) simply plots the matrix in colours depicting the strength of an interaction and options for re-arranging columns and rows (e.g. to identify compartments or nesting). The other function (\code{\link{plotweb}}) plots the actual web with participants (as two rows of rectangles) connected by lines (proportional to interaction strength). Both can be customised by many options.
The second step is to \bold{calculate indices} describing network topography. There are \bold{three} different levels this can be achieved at: the entire web (using function \code{\link{networklevel}}), at the level of each group (also using function \code{\link{networklevel}}) or the individual species (using function \code{\link{specieslevel}}). Most other functions in the package are helpers, although some can be called on their own and return the respective result (\code{\link{dfun}}, \code{\link{H2fun}} and \code{\link{second.extinct}} with \code{\link{slope.bipartite}}).
The third step is to \bold{compare results to null models}. Many interaction matrices are very incomplete snapshots of the true underlying network (e.g. a one-week sampling of a pollination network on a patch of 4 x 4 meters). As a consequence, many species were rarely observed, many are singletons (only one recording). To make analyses comparable across networks with different sampling intensity and number of species per group, we need a common yardstick. We suggest that users should use a null model, i.e. an algorithm that randomises entries while constraining some web properties (such as dimensions, marginal totals or connectance). The function \code{\link{nullmodel}} provides a few such null models, but this is a wide field of research and we make no recommendations (actually, we do: see Dormann et al. 2009 and Dormann 2011, both shipping in the doc-folder of this package). You can also simulate networks using \code{\link{genweb}} or \code{\link{null.distr}}.
Finally, bipartite comes with 23 quantitative pollination network data sets taken from the NCEAS interaction webs data base (use \code{data(package="bipartite")} to show their names) and it has a few miscellaneous functions looking at some special features of bipartite networks (such as modularity: \code{\link{computeModules}} or apparent competition: \code{\link{PAC}}).
\bold{Speed}: The code of bipartite is almost exclusively written in R. You can increase the speed a bit (by 30 to 50 \%, depending on the functions you use) by compiling functions on-the-fly. To do so, you need to load the \pkg{compiler} package and type: \code{enableJIT(3)}. The first time you call a function, it will be compiled to bytecode (just-in-time: jit), which takes a few seconds, but the second call will be substantially faster than without compilation. In the few tests we have run, this improvement was NOT substantial (i.e. a few tens of percent), indicating, I guess, that our R code wasn't too bad. See \pkg{compiler}-help files or \url{http://www.r-statistics.com/2012/04/speed-up-your-r-code-using-a-just-in-time-jit-compiler/} for details.
See help pages for details and examples.
\tabular{ll}{
Package: \tab bipartite\cr
Type: \tab Package\cr
Version: \tab 2.06\cr
Date: \tab 2015-09-25\cr
License: \tab GPL \cr
}
}
\section{versionlog}{
Please see help page \code{\link{versionlog}} for all changes and updates prior to version 2.00. This page will only list most recent changes.
\itemize{
\item 2.07 (release date: XX-YYYY-2015)
\describe{ % begin describe 2.07
\item{Some explanation addded to \code{\link{czvalues}},}{ where a z-value of NA is returned if a species is alone (in its trophic level) in a module. This is due to the way z-values are computed, and not a bug.}
} % end describe 2.07
\item 2.06 (release date: 29-SEP-2015)
\describe{ % begin describe 2.06
\item{Bug fix in \code{\link{C.score}},}{ which did not compute the maximum number of possible checkerboards correctly, and hence let the normalised C-score to be incorrect. Now it uses a brute-force approach, which works fine but takes its time.}
\item{Function \code{\link{nestedcontribution}}}{was not exported (i.e. not listed in the namespace file). Fixed. Thanks to Wesley Dátillo for reporting.}
\item{Help page of \code{\link{specieslevel}}}{now correctly described a species' degree as sum of its links. Thanks to Bernhard Hoiß for the correction!}
\item{C++-warnings addressed:}{outcommented some unused variables in dendro.h and removed some fprintf-warnings in bmn5.cc}
\item{Little bug fix in \code{\link{vaznull}}:}{Threw an error when matrix was without 0s. Thanks to Thais Zanata for reporting.}
} % end describe 2.06
\item 2.05 (release date: 24-Nov-2014)
\describe{ % begin describe 2.05
\item{New function \code{\link{nestedcontribution}}}{which computes the contribution of each species to the overall nestedness, based on Bascompte et al. 2003 and as used by Saavedra et al. 2011. Many thanks to Daniel Stouffer for contributing this function!}
\item{New function \code{\link{mgen}}:}{this function is based on R-code written by Diego Vázquez (many thanks for sending the code), with a bit of brushing up options by CFD. The function takes a probability matrix generated by whatever mechanism and builds a null model from it. This is a niffty little idea, making null modelling concerned with generating ideas on what makes an interaction probable and leaving the step of producing and integer-network of simulated interactions to this function.}
\item{minor fixes in \code{\link{networklevel}}}{``weighted connectance'' was only returned when ``linkage density'' was in ``index'' call; now also available on its own. Also sligthly revised the help file.}
\item{\code{\link{nested}} with option \option{weighted NODF} called the unsorted version of this function,}{while calling the same index in \code{\link{networklevel}} called the sorted. This is not nice (although not strictly wrong). Now both call the sorted version and users have to directly invoke \code{nestednodf} for the unsorted option. Many thanks to Julian Resasco for reporting!}
\item{Changes to the help page of \code{\link{vaznull}}:}{I (CFD) misread the original paper introducing this null model and hence assumed that\code{\link{vaznull}} would constrain marginal totals \bold{and} connectance. However, this was not intended in Diego Vázquez original implementation and never stated anywhere (except in the help pages of this function here in bipartite). Hence, the help pages were changed to now reflect both intention and actual action of this function. This also means that currently only one null model with constrained marginal totals \bold{and} connectance is available: \code{\link{swap.web}}. Many thanks to Diego for clearing this up!}
\item{Some example code had to be re-written}{to adapt to the upcoming/new structure of \pkg{vegan}, which got rid of function \code{commsimulator} (replaced by \code{simulate}). Many thanks to Jari Oksanen for informing me about this!}
\item{Added an error message}{to function \code{\link{second.extinct}} for the case that a user wants to provide an extinction sequence for both trophic levels. There is no obvious way to simulate this across the two groups, and hence it is not implemented. Also added error messages for non-matching vector/web dimensions and alike.}
} % end describe 2.05
\item 2.04 (release date: 25-Mar-2014)
\describe{ % begin describe 2.04
\item{R-C++-communication bug fixed in \code{\link{computeModules}}:}{This bug has been a constant thorn in my side. Somehow the C-code behind \code{computeModules} could only be called once. On second call, it returned an error because somehow it kept some old files in memory. So far, I used a work-around (unloading and re-loading the dynamic library), which only worked on Windows and Mac. I still don't fully understand it, but thanks to Tobias Hegemann (whom I paid for being more competent than myself) we now have a function running bug-free on all platforms. (Deep sigh of relief.)}
\item{The call of index ``functional complementarity'' through \code{\link{networklevel}} did not work.}{Fixed this legacy issue, which was due to a confusion created by the index' earlier name of ``functional diversity''.}
\item{Help page to \code{\link{specieslevel}} gave incomplete name for one index:}{Should be \option{interaction push pull}; also the function itself had the ``push pull''-bit messed up. Thanks to Natacha Chacoff for reporting!}
\item{Sequence of indices differed between lower and higher level. (Fixed.)}{Both should be the same and should fit the description in the help file. Thanks to Jimmy O'Donnell for reporting!}
} % end describe 2.04
\item 2.03 (release date: 15-Jan-2014)
\describe{ % begin describe 2.03
\item{Some ghost text led to conflicts with the updated package checking.}{Ghost text deleted. Thanks to Brian Ripley of the R-Team and CRAN for not only reporting the issue but also pointing to its solution!}
\item{Option \option{empty.web} added to \code{\link{specieslevel}}:}{Similar to the argument in \code{\link{networklevel}}; non-interacting species from the network were always excluded so far; new option \option{FALSE} not fully tested yet.}
\item{Minor bug fix in \code{\link{specieslevel}}:}{ ``pollination support index'' returned ``PSI''; ``PDI'' now referenced correctly as ``paired differences index''.}
\item{Simplification in \code{\link{grouplevel}} and correspondingly in \code{\link{networklevel}}:}{Previously, \code{index="generality"} or \code{"vulnerability"} was identical to \code{"effective partners"} with option \code{weighted=TRUE}, but different for \code{weighted=FALSE} (to which only \code{"effective partners"} responded). We reduced this to one index called "generality" or "vulnerability" (depending on the focal group), but which will now give the non-weighted mean if option \code{weighted=FALSE}. It can still be called by "effective partners" for backward compatibility.}
\item{Function \code{\link{grouplevel}} used \code{fd} wrongly!}{Instead of returning the value for rows, it returned the functional diversity for columns (and vice versa). We also used the opportunity to rename the index to its correct name: ``functional complementarity'' and the function to \code{\link{fc}}. Help pages for \code{\link{fc}} and \code{\link{grouplevel}} were adapted accordingly. Thanks to Mariano Devoto for pointing out this mistake!}
\item{New index ``weighted connectance'' in function \code{\link{networklevel}}:}{This index is simply computed as linkage density divided by number of species in the network. Note that using \option{empty.web=TRUE} will affect this value (which is intended). Thanks to Becky Morris for suggesting to add this index here.}
\item{Help page for function \code{\link{PDI}} corrected.}{Thanks to Timothy Poisot for reporting some issues in the help page.}
} % end describe 2.03
\item 2.02 (release date: 30-Sep-2013)
\describe{ % begin describe 2.02
\item{Glitch fixed in \code{\link{grouplevel}} (thus also affecting \code{networklevel}).}{Networks with only one species in one of the two levels resulted in errors, rather than simply return NA for C-score and secondary extinction computation. Thanks to whoever it was for reporting (at the INTECOL workshop).}
\item{Minor bug fixes in \code{\link{specieslevel}}:}{Gave error messages for closeness and betweenness if the network had no shortest path. Now returns a warning and NAs instead. Reported: JF.}
\item{Minor bux fix in \code{\link{networklevel}}:}{Failed to work when an index was listed twice in the function call. Reported: JF.}
\item{New function \code{\link{r2dexternal}}:}{This function is a null model algorithm like Patefields (\code{r2dtable}, but it excepts externally measured abundances to compute the null model-expectation. Experimental.}
\item{Memory leak in \code{\link{computeModules}} fixed.}{Because some object was not deleted, memory consumption of this function shot through the roof (with time). Since R has a somewhat weird way of handling memory, I think that also subsequent operations were slower (because the dynamically expanded memory is not being shrunken again, which is a problem if you use the hard drive as RAM). Thanks to Florian Hartig for investing the time to fix it!}
} % end describe 2.02
\item 2.01 (release date: 28-Jun-2013)
This release features smoothing of various glitches that were introduced when we cleaned up the code for version 2.00.
\describe{ % begin describe 2.01
\item{New index for \code{\link{specieslevel}}:}{Computes the nestedness rank (as proposed by Alarcon et al. 2008). Can also be employed directly using the \bold{new function} \code{\link{nestedrank}} with options for weighting for number of interactions per link, normalising the rank and different method to compute the nestedness-arranged matrix.}
\item{Polishing \code{\link{specieslevel}}:}{Now returns an error message if the index is not recognised, instead of an empty list.}
\item{Function \code{\link{plotweb}}}{received an option to plot additional individuals of a species in different ways. For a host-parasitoid network, some hosts are not parasitised. This data vector can now be interpreted in two ways, making the plotting function a bit more flexible.} %Thanks to Jochen Fründ for implementing.
\item{Function \code{\link{degreedistr}}}{can now be invoked for each level separately. Also arguments can be passed to the plotting options.}
\item{New data set \code{\link{junker2013}}:}{a nice and large pollination network. Thanks to Robert Junker for providing this data set!}
\item{Fixed computation of secondary extinction slopes for both levels simultaneously}{for random extinction sequences. This was so far not possible, because the function did not combine extinction sequences of different lengths. This was simply an oversight, reported by Richard Lance. (Thanks!)}
} % end describe 2.01
\item 2.00 (release date: 15-Mar-2013)
A new version number usually indicates substantial changes. In this case, we have re-named and re-grouped some of the output of \code{\link{networklevel}} and \code{\link{specieslevel}} for greater consistency and transparency. Beware! Running the same functions now (2.00 and up) will yield different results to <2.00 (because the same values are now in a different sequence).
We also started carefully renaming indices and re-writing help files. The main reason is that we started this work thinking of pollination networks. Over time, however, other types of ecological networks came into focus, and now also non-ecological networks are on the table. Thus, we started (and shall continue) referring to lower and higher levels, rather than plant and pollinators, hosts and predators or even trophic levels. Thus, in our emerging nomenclature the two levels are referred to as \dQuote{groups} (their members remain \dQuote{species} interacting with their \dQuote{partners} in the other group).
Please read (or at least skim) the help pages before using a function of version 2.00 for the first time.
In function \code{\link{specieslevel}} indices can now be computed for levels separately (or together). Few user-visible changes, but complete re-structuring under the hood. Option \option{species number} was moved to \code{\link{grouplevel}} as \option{number of species}.
In the new function \code{\link{grouplevel}} we collected all indices that can be computed for each of the two groups (i.e. trophic or other levels). Indices can be computed for each group separately or for both simultaneously. All group-level indices are also accessible through \code{\link{networklevel}}!
In the new function \code{\link{linklevel}} we collected all indices that can be computed for each cell of the bipartite matrix. Currently, there are few such indices, however.
In function \code{\link{networklevel}} we dropped the plotting options. Users wanting to plot degree distributions or extinction slopes are encouraged to use the functions \code{\link{degreedistr}} and \code{\link{slope.bipartite}}, respectively.
Furthermore, due to licensing issues, we copy-pasted several functions from the package \bold{tnet}, created and maintained by Tore Opsahl, to bipartite. We have so far called these functions from tnet, but only recently did R start to enforce license compatibility, which caused this step (bipartite being GPL and tnet being CC by-NC 3.0). We are really very grateful to Tore for allowing us to include the following functions: \code{\link{as.tnet}}, \code{\link{betweenness_w}}, \code{\link{closeness_w}}, \code{\link{clustering_tm}}, \code{\link{distance_w}}, \code{\link{symmetrise_w}}, \code{\link{tnet_igraph}}.
Here a more detailed list of changes:
\describe{
\item{\code{\link{networklevel}}}{
\itemize{
\item Function call and output now more consistent in naming and sequence. When higher and lower level indices are given (e.g. extinction slopes, number of shared partners), the first will always be the one referring to the property of the \emph{lower} level. From a pollinator network perspective, the first value in such a pair describes a plant-level index, the second a pollinator-level index.
\item Indices \option{mean interaction diversity} dropped from \code{\link{networklevel}}. We found no reference to this metric and saw little use for it. It is very similar to vulnerability/generality and can easily be computed from the output of \code{\link{specieslevel}} as \code{mean(specieslevel(web, index="diversity"))}.
\item Now also accepts non-integer values as input. The argument \option{H2_integer} will then automatically be set to FALSE. Will return NA for those indices that cannot be computed (e.g. Fisher's alpha). As a knock-on effect, \code{\link{H2fun}} had to be slightly adapted to round to machine precision when searching for H2min. (A somewhat technical detail, but making \code{H2fun} getting caught sometimes.)
} % end itemize
}
\item{New function \code{\link{grouplevel}}}{in which we collected indices that can be computed for each of the two groups (i.e. trophic or other levels). Indices can be computed for each group separately or for both simultaneously. All group-level indices are also accessible through \code{\link{networklevel}}!}
\item{New function \code{\link{linklevel}}}{in which we collect indices that can be computed for each cell of the bipartite matrix.}
% \item{Index \option{ISA} = \option{interaction strength asymmetry} = \option{dependence asymmetry}} dropped from \code{\link{networklevel}}{We found no study that constructively used this metric and saw little use for it. It can easily be computed from the output of \code{\link{specieslevel}} in two lines of code: \code{out <- specieslevel(web, index="dependence"); mean(abs(out[[1]][[1]]-out[[2]][[1]])/pmax(out[[1]][[1]], out[[2]][[1]]), na.rm=TRUE)}. }
\item{New option to \code{\link{PDI}}:}{\option{normalise=FALSE} offers the option of using the index as originally proposed, although we prefer to use TRUE and made this the default. }
\item{Corrected network \code{\link{bezerra2009}}.}{Network was actually the transpose of the correct network and hence wrongly had plant species as columns.}
\item{New function \code{\link{endpoint}}}{computes end-point degrees following Barrat et al. (2004); one of the indices computed at \code{\link{linklevel}}.}
\item{New function \code{\link{frame2webs}}}{helps organising data into one or more webs.}
\item{New function \code{\link{webs2array}}}{helps organising webs into one array.}
\item{Function \code{\link{specieslevel}}}{gained two new indices (thanks to Jochen Fründ): \option{proportional} \option{similarity} and \option{proportional generality}. See help page of that function for details.}
\item{New function \code{\link{npartite}}}{Experimental function to analyse more-than-2-level networks.}
\item{\code{\link{visweb}}}{now obeys the label size to make sure labels are always in the plotting area. Thanks to Zachary Grinspan %(no, he's not a character from Harry Potter, but he does have a sense of humour)
for drawing our attention to this issue.}
\item{Little bug fix in \code{\link{second.extinct}}}{Function failed for argument \option{participant="both"} because I filled the extinction sequence with the wrong number of 0s (to achieve always the same dimensionality of results in repeated runs). Thanks to Carine Emer for reporting!}
\item{\code{\link{specieslevel}}}{failed to work for non-matrix data (i.e. \code{data.frames}). It now coerces \code{data.frames} to \code{matrix} as a first step and hence should work also on \code{data.frame}s. Thanks to Marina Wolowski for drawing our attention to this problem.}
\item{Minor bug fix in \code{\link{dfun}}:}{When external abundances were provided with a 0 in it, \code{dfun} could throw up \code{Inf}-values. Reported by Indrani Singh and fixed by Jochen Fründ.}
\item{Settings for functions called by \code{\link{nested}}}{are now enshrined in stone. The initial reason was to set only the default for one function (\code{\link{nestedness}}) to a faster setting (\option{null.models=FALSE}), but then I decided to restrict all settings to the defaults of the functions called (except for this one option).}
\item{Bug fix for the rarely used function \code{\link{null.t.test}}:}{Did not work if only one index was given.}
} % end of describe 2.00
} % end of versionlog's itemize
} % end of section versionlog
\author{
Carsten F. Dormann, Jochen Fründ and Bernd Gruber, with additional code from many others (referred to in the respective help file), noticeably from Tore Opsahl's tnet package.
Maintainer: Carsten Dormann \email{carsten.dormann@biom.uni-freiburg.de}
}
\references{
Alarcon, R., Waser, N.M. and Ollerton, J. 2008. Year-to-year variation in the topology of a plant-pollinator interaction network. \emph{Oikos} \bold{117}, 1796--1807
Almeida-Neto, M. and Ulrich, W. (2011) A straightforward computational approach for measuring nestedness using quantitative matrices. \emph{Environmental Modelling & Software}, \bold{26}, 173--178
Bascompte, J., Jordano, P. and Olesen, J. M. (2006) Asymmetric coevolutionary networks facilitate biodiversity maintenance. \emph{Science} \bold{312}, 431--433
Bersier, L. F., Banasek-Richter, C. and Cattin, M. F. (2002) Quantitative descriptors of food-web matrices. \emph{Ecology} \bold{83}, 2394--2407
Blüthgen, N., Menzel, F. and Blüthgen, N. (2006) Measuring specialization in species interaction networks. \emph{BMC Ecology} \bold{6}, 12
Blüthgen, N., Menzel, F., Hovestadt, T., Fiala, B. and Blüthgen, N. (2007) Specialization, constraints, and conflicting interests in mutualistic networks. \emph{Current Biology} \bold{17}, 1--6
Corso G., de Araújo A.I.L. and de Almeida A.M. (2008) A new nestedness estimator in community networks. \emph{arXiv}, 0803.0007v1 [physics.bio-ph]
Dalsgaard, B., A. M. Martín González, J. M. Olesen, A. Timmermann, L. H. Andersen, and J. Ollerton. (2008) Pollination networks and functional specialization: a test using Lesser Antillean plant-hummingbird assemblages. \emph{Oikos} \bold{117}, 789--793
Devoto M., Bailey S., Craze P. & Memmott J. (2012) Understanding and planning ecological restoration of plant-pollinator networks.
\emph{Ecology Letters} \bold{15}, 319--328
Dormann, C.F., Fründ, J., Blüthgen, N., and Gruber, B. (2009) Indices, graphs and null models: analysing bipartite ecological networks. \emph{The Open Ecology Journal} \bold{2}, 7--24
Dormann, C.F. (2011) How to be a specialist? Quantifying specialisation in pollination networks. \emph{Network Biology} \bold{1}, 1--20
Galeano J., Pastor J.M. and Iriondo J.M. (2008) Weighted-Interaction Nestedness Estimator (WINE): A new estimator to calculate over frequency matrices. \emph{arXiv} 0808.3397v1 [physics.bio-ph]
Martín Gonzáles, A.M., Dalsgaard, B. and Olesen, J.M. (2009) Centrality measures and the importance of generalist species in pollination networks. \emph{Ecological Complexity}, \bold{7}, 36--43
Memmott, J., Waser, N. M. and Price, M. V. (2004) Tolerance of pollination networks to species extinctions. \emph{Proceedings of the Royal Society B} \bold{271}, 2605--2611
Morris, R. J., Lewis, O. T. and Godfray, H. C. J. (2004) Experimental evidence for apparent competition in a tropical forest food web. \emph{Nature} \bold{428}, 310--313
Morris, R. J., Lewis, O. T. and Godfray, H. C. J. (2005) Apparent competition and insect community structure: towards a spatial perspective. \emph{Annales Zoologica Fennici} \bold{42}, 449--462.
Müller, C. B., Adriaanse, I. C. T., Belshaw, R. and Godfray, H. C. J. (1999) The structure of an aphid-parasitoid community. \emph{Journal of Animal Ecology} \bold{68}, 346--370
Poisot, T., Lepennetier, G., Martinez, E., Ramsayer, J., and Hochberg, M.E. (2011a) Resource availability affects the structure of a natural bacteria-bacteriophage community. \emph{Biology Letters} \bold{7}, 201--204
Poisot, T., Bever, J.D., Nemri, A., Thrall, P.H., and Hochberg, M.E. (2011b) A conceptual framework for the evolution of ecological specialisation. \emph{Ecology Letters} \bold{14}, 841--851
Tylianakis, J. M., Tscharntke, T. and Lewis, O. T. (2007) Habitat modification alters the structure of tropical host-parasitoid food webs. \emph{Nature} \bold{445}, 202--205
Vázquez, D. P. and Aizen, M. A. (2004) Asymmetric specialization: A pervasive feature of plant-pollinator interactions. \emph{Ecology} \bold{85}, 1251--1257
Vázquez, D.P., Chacoff, N.,P. and Cagnolo, L. (2009) Evaluating multiple determinants of the structure of plant-animal mutualistic networks. \emph{Ecology} \bold{90}, 2039--2046.
}
\keyword{ package }
\examples{
\dontrun{
data(Safariland)
plotweb(Safariland)
visweb(Safariland)
networklevel(Safariland)
specieslevel(Safariland)
}
}
|
/01-Primer Semestre/Inferencia Estadística/Tareas/Tarea 2/Ejercicios en R/TAREA2 EJERCICIO5 Hairo Belmonte.R
|
no_license
|
nicoletron770/Maestria-Computo-Estadistico
|
R
| false
| false
| 4,102
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial_enrichment.R
\name{createSpatialEnrich}
\alias{createSpatialEnrich}
\title{createSpatialEnrich}
\usage{
createSpatialEnrich(
gobject,
enrich_method = c("PAGE", "rank", "hypergeometric"),
sign_matrix,
expression_values = c("normalized", "scaled", "custom"),
reverse_log_scale = TRUE,
logbase = 2,
p_value = TRUE,
n_genes = 100,
n_times = 1000,
top_percentage = 5,
output_enrichment = c("original", "zscore"),
name = "PAGE",
return_gobject = TRUE
)
}
\arguments{
\item{gobject}{Giotto object}
\item{enrich_method}{method for gene signature enrichment calculation}
\item{sign_matrix}{Matrix of signature genes for each cell type / process}
\item{expression_values}{expression values to use}
\item{reverse_log_scale}{reverse expression values from log scale}
\item{logbase}{log base to use if reverse_log_scale = TRUE}
\item{p_value}{calculate p-value (default = FALSE)}
\item{n_times}{(page/rank) number of permutation iterations to calculate p-value}
\item{top_percentage}{(hyper) percentage of cells that will be considered to have gene expression with matrix binarization}
\item{output_enrichment}{how to return enrichment output}
\item{name}{to give to spatial enrichment results, default = PAGE}
\item{return_gobject}{return giotto object}
}
\value{
Giotto object or enrichment results if return_gobject = FALSE
}
\description{
Function to calculate gene signature enrichment scores per spatial position using a hypergeometric test.
}
\details{
For details see the individual functions:
\itemize{
\item{PAGE: }{\code{\link{PAGEEnrich}}}
\item{PAGE: }{\code{\link{rankEnrich}}}
\item{PAGE: }{\code{\link{hyperGeometricEnrich}}}
}
}
\examples{
createSpatialEnrich(gobject)
}
|
/doc/createSpatialEnrich.Rd
|
no_license
|
bernard2012/spatialgiotto_web
|
R
| false
| true
| 1,808
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial_enrichment.R
\name{createSpatialEnrich}
\alias{createSpatialEnrich}
\title{createSpatialEnrich}
\usage{
createSpatialEnrich(
gobject,
enrich_method = c("PAGE", "rank", "hypergeometric"),
sign_matrix,
expression_values = c("normalized", "scaled", "custom"),
reverse_log_scale = TRUE,
logbase = 2,
p_value = TRUE,
n_genes = 100,
n_times = 1000,
top_percentage = 5,
output_enrichment = c("original", "zscore"),
name = "PAGE",
return_gobject = TRUE
)
}
\arguments{
\item{gobject}{Giotto object}
\item{enrich_method}{method for gene signature enrichment calculation}
\item{sign_matrix}{Matrix of signature genes for each cell type / process}
\item{expression_values}{expression values to use}
\item{reverse_log_scale}{reverse expression values from log scale}
\item{logbase}{log base to use if reverse_log_scale = TRUE}
\item{p_value}{calculate p-value (default = FALSE)}
\item{n_times}{(page/rank) number of permutation iterations to calculate p-value}
\item{top_percentage}{(hyper) percentage of cells that will be considered to have gene expression with matrix binarization}
\item{output_enrichment}{how to return enrichment output}
\item{name}{to give to spatial enrichment results, default = PAGE}
\item{return_gobject}{return giotto object}
}
\value{
Giotto object or enrichment results if return_gobject = FALSE
}
\description{
Function to calculate gene signature enrichment scores per spatial position using a hypergeometric test.
}
\details{
For details see the individual functions:
\itemize{
\item{PAGE: }{\code{\link{PAGEEnrich}}}
\item{PAGE: }{\code{\link{rankEnrich}}}
\item{PAGE: }{\code{\link{hyperGeometricEnrich}}}
}
}
\examples{
createSpatialEnrich(gobject)
}
|
library(readxl)
library(writexl)
library(stringr)
library(httr)
library(rvest)
library(xml2)
# Remember to set work directory to read/write desired excel file if not using this cloud service
# check if the excel file is uploaded
filename = NULL
for(f in list.files(path = "/cloud/project")){
if(str_detect(f, "xls")){
filename <<- f
}
}
if(is.null(filename) == TRUE){
stop("No xls file detcted")
}
# read and store excel spreadsheet
SC = read_excel(filename)
# Function: to check if column exist
PA_exist <- function(ds,col_name){
for(c in names(SC)){
if(c == col_name){
return(TRUE)
}
}
}
# check if column 'Property_Address' exist
if(is.null(PA_exist(SC,"Property_Address")) == TRUE){
stop("No 'Property_Address' column detected")
}
# check if column 'Property_Type' exist
if(is.null(PA_exist(SC,"Property_Type")) == TRUE){
stop("No 'Property_Type' column detected")
}
# Create the dataframe with removed rows where there is an emtpy value in Property_Address" column
SC <- subset(SC, !is.na(Property_Address))
dfF <- subset(SC, !is.na(Property_Type)) # df with already "Property_Type" values filled
dfNA <- subset(SC, is.na(Property_Type)) # df with "Property_Type" values yet to be filled
# Function: to check if text "Single Family" exist
S_Sing <- function(Ads){
for(i in Ads){
if(str_detect(i, "Single Family")){
return(TRUE)
break
}
}
}
# Function to access URL; submit form; perform search; and store results to "Property_Type" column
search <- function(Texts){
x = 0
for(j in Texts){
x = x + 1
# extract html document of the website
html_doc = html_session("https://www.propertyshark.com/mason/")
# find and fill the forms no. 1
# nextpage is result of submitted form
nextpage = submit_form(html_doc, set_values(html_form(html_doc)[[1]], search_token=j ,location='Santa Clara County, CA'))
# if next page's title is "Lookup | PropertyShark"
if(nextpage %>% html_nodes(xpath = '//title') %>% html_text() == "Lookup | PropertyShark"){
dfNA$Property_Type[x] <<- "_invalid input"
# if next page's title is "UI | PropertyShark" meaning multiple listings
}else if(nextpage %>% html_nodes(xpath = '//title') %>% html_text() == "UI | PropertyShark"){
# if text "Single Family" is found
if(is.null(S_Sing(nextpage %>% html_nodes(xpath = '//div[@class="description"]'))) == TRUE){
dfNA$Property_Type[x] <<- "_no single family"
}else {
dfNA$Property_Type[x] <<- "Single Family"
}
# if next page is desired result page
}else if(str_detect(nextpage %>% html_nodes(xpath = '//title') %>% html_text(),"Property Information | PropertyShark")){
# if "Single Family" detected append "Single Family" meaning S_Sing() is not empty
if(is.null(S_Sing(nextpage %>% html_nodes(xpath = '//div[@class="cols22"]') %>% html_text())) == FALSE){
dfNA$Property_Type[x] <<- "Single Family"
# else store the "Property Type"
}else {
dfNA$Property_Type[x] <<- str_trim(str_split(str_split((nextpage %>% html_nodes(xpath = '//div[@class="cols22"]') %>% html_text())[1], "\\(", simplify = T)[,1], " class\n", simplify = T)[,2])
}
}else {
dfNA$Property_Type[x] <<- "_error"
}
}
}
# Perform search
search(dfNA$Property_Address)
# combine a new data frame
SCNew <- rbind(dfF,dfNA)
# write new excel file
write_xlsx(SCNew, "dataoutput.xlsx")
|
/Santa Clara County Project/Santa Clara Upwork .R
|
no_license
|
gbajsingh/Upwork-jobs
|
R
| false
| false
| 3,480
|
r
|
library(readxl)
library(writexl)
library(stringr)
library(httr)
library(rvest)
library(xml2)
# Remember to set work directory to read/write desired excel file if not using this cloud service
# check if the excel file is uploaded
filename = NULL
for(f in list.files(path = "/cloud/project")){
if(str_detect(f, "xls")){
filename <<- f
}
}
if(is.null(filename) == TRUE){
stop("No xls file detcted")
}
# read and store excel spreadsheet
SC = read_excel(filename)
# Function: to check if column exist
PA_exist <- function(ds,col_name){
for(c in names(SC)){
if(c == col_name){
return(TRUE)
}
}
}
# check if column 'Property_Address' exist
if(is.null(PA_exist(SC,"Property_Address")) == TRUE){
stop("No 'Property_Address' column detected")
}
# check if column 'Property_Type' exist
if(is.null(PA_exist(SC,"Property_Type")) == TRUE){
stop("No 'Property_Type' column detected")
}
# Create the dataframe with removed rows where there is an emtpy value in Property_Address" column
SC <- subset(SC, !is.na(Property_Address))
dfF <- subset(SC, !is.na(Property_Type)) # df with already "Property_Type" values filled
dfNA <- subset(SC, is.na(Property_Type)) # df with "Property_Type" values yet to be filled
# Function: to check if text "Single Family" exist
S_Sing <- function(Ads){
for(i in Ads){
if(str_detect(i, "Single Family")){
return(TRUE)
break
}
}
}
# Function to access URL; submit form; perform search; and store results to "Property_Type" column
search <- function(Texts){
x = 0
for(j in Texts){
x = x + 1
# extract html document of the website
html_doc = html_session("https://www.propertyshark.com/mason/")
# find and fill the forms no. 1
# nextpage is result of submitted form
nextpage = submit_form(html_doc, set_values(html_form(html_doc)[[1]], search_token=j ,location='Santa Clara County, CA'))
# if next page's title is "Lookup | PropertyShark"
if(nextpage %>% html_nodes(xpath = '//title') %>% html_text() == "Lookup | PropertyShark"){
dfNA$Property_Type[x] <<- "_invalid input"
# if next page's title is "UI | PropertyShark" meaning multiple listings
}else if(nextpage %>% html_nodes(xpath = '//title') %>% html_text() == "UI | PropertyShark"){
# if text "Single Family" is found
if(is.null(S_Sing(nextpage %>% html_nodes(xpath = '//div[@class="description"]'))) == TRUE){
dfNA$Property_Type[x] <<- "_no single family"
}else {
dfNA$Property_Type[x] <<- "Single Family"
}
# if next page is desired result page
}else if(str_detect(nextpage %>% html_nodes(xpath = '//title') %>% html_text(),"Property Information | PropertyShark")){
# if "Single Family" detected append "Single Family" meaning S_Sing() is not empty
if(is.null(S_Sing(nextpage %>% html_nodes(xpath = '//div[@class="cols22"]') %>% html_text())) == FALSE){
dfNA$Property_Type[x] <<- "Single Family"
# else store the "Property Type"
}else {
dfNA$Property_Type[x] <<- str_trim(str_split(str_split((nextpage %>% html_nodes(xpath = '//div[@class="cols22"]') %>% html_text())[1], "\\(", simplify = T)[,1], " class\n", simplify = T)[,2])
}
}else {
dfNA$Property_Type[x] <<- "_error"
}
}
}
# Perform search
search(dfNA$Property_Address)
# combine a new data frame
SCNew <- rbind(dfF,dfNA)
# write new excel file
write_xlsx(SCNew, "dataoutput.xlsx")
|
# Load libraries ---------------------------------------------------------------
library(ggbeeswarm)
library(tidyverse)
library(lubridate)
library(statgl)
library(plotly)
# Import data ------------------------------------------------------------------
passwords_raw <-
readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-01-14/passwords.csv')
# Declare helper function ------------------------------------------------------
num_uniques <- function(string) {
string %>%
map(function(string) unique(str_split(string, "")[[1]])) %>%
map_int(length)
}
# Tidy/transform ---------------------------------------------------------------
passwords <- passwords_raw %>%
drop_na() %>%
unite(crack_time, value, time_unit, sep = " ") %>%
mutate(crack_time_ = time_length(crack_time, unit = "second"),
pass_length = str_length(password),
pass_unique = num_uniques(password),
category = category %>%
str_replace_all("-", " ") %>%
str_to_title)
# Visualise --------------------------------------------------------------------
# Which categories are the most popular?
passwords %>%
count(category, sort = T)
passwords %>%
mutate(category = fct_infreq(category) %>% fct_rev) %>%
ggplot(aes(x = category, y = rank, color = category, group = category,
text = paste("Password:", password, "\nRank:", rank))) +
geom_quasirandom(size = 0.75, width = 0.3) +
coord_flip() +
scale_y_reverse() +
theme_statgl() +
theme(legend.position = "none") +
scale_color_statgl() +
labs(title = "183 of the 500 most popular passwords are names",
x = "", y = "Rank") ->
password_rank
interactive_plot <-
ggplotly(password_rank, tooltip = "text")
interactive_plot
# Does password length == safe password?
qplot(log(crack_time_), data = passwords) # Approx log normal
qplot(strength, data = passwords, binwidth = 1)
qplot(pass_length, strength, data = passwords)
passwords %>%
mutate(pass_length = factor(pass_length),
hi_there = format(crack_time, scientific = FALSE),
alpha_indicator = case_when(
category == "Simple Alphanumeric" ~ "Alphanumeric password",
T ~ "Literally anything else"
)) %>%
ggplot(aes(x = pass_length, y = offline_crack_sec,
color = alpha_indicator,
text = paste("Password:", password, "\nTime to crack:", crack_time)
)) +
geom_quasirandom(alpha = 0.8) +
scale_y_log10(labels = scales::comma_format(suffix = " sec")) +
theme_statgl() +
theme(legend.position = "bottom") +
labs(x = "Password length (characters)",
y = "Time to crack by online guessing",
color = "",
caption = "Log scale") ->
time_to_crack
ggplotly(time_to_crack, tooltip = "text")
passwords %>%
mutate(category = fct_reorder(category, crack_time_) %>% fct_rev) %>%
ggplot(aes(x = category, y = log(crack_time_), color = category)) +
geom_quasirandom() +
coord_flip() +
theme_statgl()
|
/wk03_passwords/passwords.R
|
no_license
|
reksiandari/tidytuesday
|
R
| false
| false
| 3,051
|
r
|
# Load libraries ---------------------------------------------------------------
library(ggbeeswarm)
library(tidyverse)
library(lubridate)
library(statgl)
library(plotly)
# Import data ------------------------------------------------------------------
passwords_raw <-
readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-01-14/passwords.csv')
# Declare helper function ------------------------------------------------------
num_uniques <- function(string) {
string %>%
map(function(string) unique(str_split(string, "")[[1]])) %>%
map_int(length)
}
# Tidy/transform ---------------------------------------------------------------
passwords <- passwords_raw %>%
drop_na() %>%
unite(crack_time, value, time_unit, sep = " ") %>%
mutate(crack_time_ = time_length(crack_time, unit = "second"),
pass_length = str_length(password),
pass_unique = num_uniques(password),
category = category %>%
str_replace_all("-", " ") %>%
str_to_title)
# Visualise --------------------------------------------------------------------
# Which categories are the most popular?
passwords %>%
count(category, sort = T)
passwords %>%
mutate(category = fct_infreq(category) %>% fct_rev) %>%
ggplot(aes(x = category, y = rank, color = category, group = category,
text = paste("Password:", password, "\nRank:", rank))) +
geom_quasirandom(size = 0.75, width = 0.3) +
coord_flip() +
scale_y_reverse() +
theme_statgl() +
theme(legend.position = "none") +
scale_color_statgl() +
labs(title = "183 of the 500 most popular passwords are names",
x = "", y = "Rank") ->
password_rank
interactive_plot <-
ggplotly(password_rank, tooltip = "text")
interactive_plot
# Does password length == safe password?
qplot(log(crack_time_), data = passwords) # Approx log normal
qplot(strength, data = passwords, binwidth = 1)
qplot(pass_length, strength, data = passwords)
passwords %>%
mutate(pass_length = factor(pass_length),
hi_there = format(crack_time, scientific = FALSE),
alpha_indicator = case_when(
category == "Simple Alphanumeric" ~ "Alphanumeric password",
T ~ "Literally anything else"
)) %>%
ggplot(aes(x = pass_length, y = offline_crack_sec,
color = alpha_indicator,
text = paste("Password:", password, "\nTime to crack:", crack_time)
)) +
geom_quasirandom(alpha = 0.8) +
scale_y_log10(labels = scales::comma_format(suffix = " sec")) +
theme_statgl() +
theme(legend.position = "bottom") +
labs(x = "Password length (characters)",
y = "Time to crack by online guessing",
color = "",
caption = "Log scale") ->
time_to_crack
ggplotly(time_to_crack, tooltip = "text")
passwords %>%
mutate(category = fct_reorder(category, crack_time_) %>% fct_rev) %>%
ggplot(aes(x = category, y = log(crack_time_), color = category)) +
geom_quasirandom() +
coord_flip() +
theme_statgl()
|
run_analysis <- function(x){
#Variables used in this function:
#features and features2 - list of feature labels for the tidy data set, and a vectorized version
#activities and activies2 - list of activity labesl for the tidy data, and a vectorized version
#subject_test, x_text, y_test - data tables from the testing data for the experiment
#subject_train, x_train, y_train - data tables from the training data for the experiment
#testing1 and training1 - combined data for the testing set and the training set respectably
#complete - merged training and testing data
#complete2 - filtered data set that only includes columns that contain "mean" or "std" in the colname
#tidy_data - data set with the Subject, Activity, and filtered observations from complete2
#tidy_data2 - tidy_data data set grouped by Subject and Activity
#mean_tidy_data - summarized tidy_data2 shows displays mean for each Activity by Subject
#sets the working directory to the correct forlder
setwd("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset")
#reads in the features file
features<- read.table("features.txt")
#creates a vector of the feature labels
features2<- c(as.character(features$V2))
#reads in acitvity names
activities <- read.table("activity_labels.txt")
#creates a vector of the activity labels
activities2 <- c(as.character(activities$V2))
#changed wd to "test"
setwd("./test")
#creates a complete data set from the testing data
create_testset <-function(){
#next three lines read in test data
subject_test<-read.table("subject_test.txt")
x_test<- read.table("X_test.txt")
y_test<- read.table("y_test.txt")
#binds subject_test, y_test, and x_test together
testing1<<-cbind(subject_test,y_test,x_test)
#counts columns in "testing1" (starting from 3), and relabels using features
}
create_testset()
#renames testing1 columns
for (i in 3:ncol(testing1)){
names(testing1)[i]<-features2[i-2]
}
#changes wd to "train"
setwd("../train")
#creates a complete data set from the training data
create_trainset<-function(){
#next three lines read in the training data
subject_train<- read.table("subject_train.txt")
x_train<-read.table("X_train.txt")
y_train<-read.table("y_train.txt")
#binds the subject label to the observations
training1<<-cbind(subject_train,y_train,x_train)
#counts columns in "training1" (starting from 3), and relables using features
}
create_trainset()
#renames training1 columns
for (i in 3:ncol(training1)){
names(training1)[i]<-features2[i-2]
}
#binds training and testing data
complete<- rbind(training1,testing1)
#uses grep to find only columns with "mean" or "std" in the colnames
complete2 <- complete[,c(colnames(complete)[grep("mean|std",colnames(complete))])]
#binds complete2 to the subject and activity columns
tidy_data <- cbind(complete[,1:2],complete2)
#renames columns 1 and 2 to "subject" and "activity"
names(tidy_data)[1:2]<- c("Subject","Activity")
#loops through tidy_data$Activity and renames to values in activites2
for (i in 1:nrow(tidy_data)){
tidy_data$Activity[i] <- activities2[as.numeric(tidy_data$Activity[i])]
}
#loads dplyr library
library(dplyr)
#groups the tidy_data by Subject and Activity
tidy_data2<- group_by(tidy_data, Subject, Activity)
#summarizes the tidy_data2 data frame using summarize_each. New data frame shows the mean for each subject and activity
mean_tidy_data<<- summarize_each(tidy_data2, funs(mean))
}
|
/run_analysis.R
|
no_license
|
simonswes/GandCData_CourseProject
|
R
| false
| false
| 3,654
|
r
|
run_analysis <- function(x){
#Variables used in this function:
#features and features2 - list of feature labels for the tidy data set, and a vectorized version
#activities and activies2 - list of activity labesl for the tidy data, and a vectorized version
#subject_test, x_text, y_test - data tables from the testing data for the experiment
#subject_train, x_train, y_train - data tables from the training data for the experiment
#testing1 and training1 - combined data for the testing set and the training set respectably
#complete - merged training and testing data
#complete2 - filtered data set that only includes columns that contain "mean" or "std" in the colname
#tidy_data - data set with the Subject, Activity, and filtered observations from complete2
#tidy_data2 - tidy_data data set grouped by Subject and Activity
#mean_tidy_data - summarized tidy_data2 shows displays mean for each Activity by Subject
#sets the working directory to the correct forlder
setwd("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset")
#reads in the features file
features<- read.table("features.txt")
#creates a vector of the feature labels
features2<- c(as.character(features$V2))
#reads in acitvity names
activities <- read.table("activity_labels.txt")
#creates a vector of the activity labels
activities2 <- c(as.character(activities$V2))
#changed wd to "test"
setwd("./test")
#creates a complete data set from the testing data
create_testset <-function(){
#next three lines read in test data
subject_test<-read.table("subject_test.txt")
x_test<- read.table("X_test.txt")
y_test<- read.table("y_test.txt")
#binds subject_test, y_test, and x_test together
testing1<<-cbind(subject_test,y_test,x_test)
#counts columns in "testing1" (starting from 3), and relabels using features
}
create_testset()
#renames testing1 columns
for (i in 3:ncol(testing1)){
names(testing1)[i]<-features2[i-2]
}
#changes wd to "train"
setwd("../train")
#creates a complete data set from the training data
create_trainset<-function(){
#next three lines read in the training data
subject_train<- read.table("subject_train.txt")
x_train<-read.table("X_train.txt")
y_train<-read.table("y_train.txt")
#binds the subject label to the observations
training1<<-cbind(subject_train,y_train,x_train)
#counts columns in "training1" (starting from 3), and relables using features
}
create_trainset()
#renames training1 columns
for (i in 3:ncol(training1)){
names(training1)[i]<-features2[i-2]
}
#binds training and testing data
complete<- rbind(training1,testing1)
#uses grep to find only columns with "mean" or "std" in the colnames
complete2 <- complete[,c(colnames(complete)[grep("mean|std",colnames(complete))])]
#binds complete2 to the subject and activity columns
tidy_data <- cbind(complete[,1:2],complete2)
#renames columns 1 and 2 to "subject" and "activity"
names(tidy_data)[1:2]<- c("Subject","Activity")
#loops through tidy_data$Activity and renames to values in activites2
for (i in 1:nrow(tidy_data)){
tidy_data$Activity[i] <- activities2[as.numeric(tidy_data$Activity[i])]
}
#loads dplyr library
library(dplyr)
#groups the tidy_data by Subject and Activity
tidy_data2<- group_by(tidy_data, Subject, Activity)
#summarizes the tidy_data2 data frame using summarize_each. New data frame shows the mean for each subject and activity
mean_tidy_data<<- summarize_each(tidy_data2, funs(mean))
}
|
#' @export
initCams <- function(cameras) {
lapply(cameras, function(x) {
RCurl::getURL(paste0("http://", x, "/cam.cgi?mode=camcmd&value=recmode"))
})
NULL
}
#' @export
grabPictures <- function(cameras) {
lapply(cameras, function(x) {
RCurl::getURL(paste0("http://", x, "/cam.cgi?mode=camcmd&value=capture"))
})
NULL
}
|
/R/camera.R
|
no_license
|
swarm-lab/observRlumix
|
R
| false
| false
| 342
|
r
|
#' @export
initCams <- function(cameras) {
lapply(cameras, function(x) {
RCurl::getURL(paste0("http://", x, "/cam.cgi?mode=camcmd&value=recmode"))
})
NULL
}
#' @export
grabPictures <- function(cameras) {
lapply(cameras, function(x) {
RCurl::getURL(paste0("http://", x, "/cam.cgi?mode=camcmd&value=capture"))
})
NULL
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{check_depreciated_args}
\alias{check_depreciated_args}
\title{check depreciated arguments}
\usage{
check_depreciated_args(blacklist = NULL, ...)
}
\arguments{
\item{blacklist}{A character vector of variable names.}
\item{...}{A list of arguments for checking.}
}
\description{
check depreciated arguments
}
|
/man/check_depreciated_args.Rd
|
permissive
|
sailfish009/proteoQ
|
R
| false
| true
| 400
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{check_depreciated_args}
\alias{check_depreciated_args}
\title{check depreciated arguments}
\usage{
check_depreciated_args(blacklist = NULL, ...)
}
\arguments{
\item{blacklist}{A character vector of variable names.}
\item{...}{A list of arguments for checking.}
}
\description{
check depreciated arguments
}
|
x = "Foo"
y = "Bar"
# x + y
# Error in x + y : non-numeric argument to binary operator
# Execution halted
z = paste(x, y, sep="")
z # FooBar
length(z) # 1
# Join elements of a vector
fruits = c("Apple", "Banana")
q = paste(fruits, collapse ="-")
q # Apple-Banana
# join elements of a numeric vector
numbers = c(2, 3, 4)
class(numbers) # numeric
nums = paste(numbers, collapse ="-")
nums # 2-3-4
|
/r/examples/basics/concatenate_strings.R
|
no_license
|
szabgab/slides
|
R
| false
| false
| 413
|
r
|
x = "Foo"
y = "Bar"
# x + y
# Error in x + y : non-numeric argument to binary operator
# Execution halted
z = paste(x, y, sep="")
z # FooBar
length(z) # 1
# Join elements of a vector
fruits = c("Apple", "Banana")
q = paste(fruits, collapse ="-")
q # Apple-Banana
# join elements of a numeric vector
numbers = c(2, 3, 4)
class(numbers) # numeric
nums = paste(numbers, collapse ="-")
nums # 2-3-4
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in Rtmph3UnEb/file63ef10b78e6c
\name{nimbleFunction}
\alias{nimbleFunction}
\title{create a nimbleFunction}
\usage{
nimbleFunction(setup = NULL, run = function() { }, methods = list(),
globalSetup = NULL, contains = NULL, name = NA,
where = getNimbleFunctionEnvironment())
}
\arguments{
\item{setup}{An optional R function definition for setup processing.}
\item{run}{An optional NIMBLE function definition the executes the primary job of the nimbleFunction}
\item{methods}{An optional named list of NIMBLE function definitions for other class methods.}
\item{globalSetup}{For internal use only}
\item{contains}{An optional object returned from \link{nimbleFunctionVirtual} that defines arguments and returnTypes for \code{run} and/or methods, to which the current nimbleFunction must conform}
\item{name}{An optional name used internally, for example in generated C++ code. Usually this is left blank and NIMBLE provides a name.}
\item{where}{An optional \code{where} argument passed to \code{setRefClass} for where the reference class definition generated for this nimbleFunction will be stored. This is needed due to R package namespace issues but should never need to be provided by a user.}
}
\description{
create a nimbleFunction from a setup function, run function, possibly other methods, and possibly inheritance via \code{contains}
}
\details{
This is the main function for defining nimbleFunctions. A lot of information is provided in the NIMBLE User Manual, so only a brief summary will be given here.
If a \code{setup} function is provided, then \code{nimbleFunction} returns a generator: a function that when called with arguments for the setup function will execute that function and return a specialized nimbleFunction. The \code{run} and other methods can be called using \code{$} like in other R classes, e.g. \code{nf$run()}. The methods can use objects that were created in or passed to the \code{setup} function.
If no \code{setup} function is provided, then \code{nimbleFunction} returns a function that executes the \code{run} function. It is not a generator in this case, and no other \code{methods} can be provided.
If one wants a generator but does not need any setup arguments or code, \code{setup = TRUE} can be used.
See the NIMBLE User Manual for examples.
For more information about the \code{contains} argument, see the section on nimbleFunctionLists.
}
\author{
NIMBLE development team
}
|
/packages/nimble/man/nimbleFunction.Rd
|
no_license
|
nxdao2000/nimble
|
R
| false
| false
| 2,524
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in Rtmph3UnEb/file63ef10b78e6c
\name{nimbleFunction}
\alias{nimbleFunction}
\title{create a nimbleFunction}
\usage{
nimbleFunction(setup = NULL, run = function() { }, methods = list(),
globalSetup = NULL, contains = NULL, name = NA,
where = getNimbleFunctionEnvironment())
}
\arguments{
\item{setup}{An optional R function definition for setup processing.}
\item{run}{An optional NIMBLE function definition the executes the primary job of the nimbleFunction}
\item{methods}{An optional named list of NIMBLE function definitions for other class methods.}
\item{globalSetup}{For internal use only}
\item{contains}{An optional object returned from \link{nimbleFunctionVirtual} that defines arguments and returnTypes for \code{run} and/or methods, to which the current nimbleFunction must conform}
\item{name}{An optional name used internally, for example in generated C++ code. Usually this is left blank and NIMBLE provides a name.}
\item{where}{An optional \code{where} argument passed to \code{setRefClass} for where the reference class definition generated for this nimbleFunction will be stored. This is needed due to R package namespace issues but should never need to be provided by a user.}
}
\description{
create a nimbleFunction from a setup function, run function, possibly other methods, and possibly inheritance via \code{contains}
}
\details{
This is the main function for defining nimbleFunctions. A lot of information is provided in the NIMBLE User Manual, so only a brief summary will be given here.
If a \code{setup} function is provided, then \code{nimbleFunction} returns a generator: a function that when called with arguments for the setup function will execute that function and return a specialized nimbleFunction. The \code{run} and other methods can be called using \code{$} like in other R classes, e.g. \code{nf$run()}. The methods can use objects that were created in or passed to the \code{setup} function.
If no \code{setup} function is provided, then \code{nimbleFunction} returns a function that executes the \code{run} function. It is not a generator in this case, and no other \code{methods} can be provided.
If one wants a generator but does not need any setup arguments or code, \code{setup = TRUE} can be used.
See the NIMBLE User Manual for examples.
For more information about the \code{contains} argument, see the section on nimbleFunctionLists.
}
\author{
NIMBLE development team
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{end_year_population_totals_long}
\alias{end_year_population_totals_long}
\title{End-year stock population figures for forcibly displaced displaced and stateless persons - Long Format}
\format{
A data frame with 153809 rows and 7 variables:
\describe{
\item{\code{Year}}{character Year}
\item{\code{CountryOriginCode}}{character Country of Origin Code isoA3}
\item{\code{CountryAsylumCode}}{character Country of Asylum Code isoA3}
\item{\code{CountryOriginName}}{character Country of Origin Name}
\item{\code{CountryAsylumName}}{character Country of Asylum Name}
\item{\code{Solution.type}}{character Type of Solution }
\item{\code{Value}}{integer Number of person}
}
}
\source{
\url{https://data.humdata.org/dataset/unhcr-population-data-for-world}
}
\usage{
end_year_population_totals_long
}
\description{
Data collated by UNHCR, containing end-year stock population figures for forcibly displaced and stateless persons residing in World. Data is available since 1951.
}
\keyword{datasets}
|
/man/end_year_population_totals_long.Rd
|
no_license
|
Naskov/unhcrdatapackage
|
R
| false
| true
| 1,112
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{end_year_population_totals_long}
\alias{end_year_population_totals_long}
\title{End-year stock population figures for forcibly displaced displaced and stateless persons - Long Format}
\format{
A data frame with 153809 rows and 7 variables:
\describe{
\item{\code{Year}}{character Year}
\item{\code{CountryOriginCode}}{character Country of Origin Code isoA3}
\item{\code{CountryAsylumCode}}{character Country of Asylum Code isoA3}
\item{\code{CountryOriginName}}{character Country of Origin Name}
\item{\code{CountryAsylumName}}{character Country of Asylum Name}
\item{\code{Solution.type}}{character Type of Solution }
\item{\code{Value}}{integer Number of person}
}
}
\source{
\url{https://data.humdata.org/dataset/unhcr-population-data-for-world}
}
\usage{
end_year_population_totals_long
}
\description{
Data collated by UNHCR, containing end-year stock population figures for forcibly displaced and stateless persons residing in World. Data is available since 1951.
}
\keyword{datasets}
|
## Manoela 16 November 2020
## Script to arrange VIIRS data + land cover classes + Protected Areas + Land tenure + States and Municipalities + Precipitation + Immediate regions
## Data:
## VIIRS_LCC and Defo
## VIIRS LT
## VIIRS PA
## VIIRS States and Municipalities
## VIIRS immediate regions (24Sep)
## updating on 07 Jan with 2020 data
#########################################
rm(list=ls())
require("tidyverse")
require("dplyr")
require("sf")
require("ggplot2")
require("lubridate")
require("plotly")
require("formattable")
MBClassCode <- read_csv('~/Oxford/FireAmazon2019_GCRF/DataFromGEE/Phase6_MBcoll5/MapBiomasClassesCODE_forPhase6.csv')
MBClassCode <- MBClassCode %>% select(-X5) %>% select(LCC_Eng, LCC_value) %>%
separate(LCC_Eng, sep= '\\. ', into=c('del', 'LCC_name'), fill='left') %>%
select(-del) %>% rename(LandCoverClassNumber = LCC_value)
AtlasAgroClassCode <- read_csv('~/Oxford/FireAmazon2019_GCRF/DataFromGEE/Phase6_MBcoll5/LandTenureATLASClassesCODE.csv')
AtlasAgroClassCode <- AtlasAgroClassCode %>% select(LandTenureClassNo, ClassEng) %>%
rename(LandTenureClassNumber = LandTenureClassNo) %>%
rename(LT_name = ClassEng)
## loop for all , done that. today only 2020
i <- 13
ld[13] # start in 5
ld <- list.dirs('~/Oxford/FireAmazon2019_GCRF/DataFromGEE/Phase6_MBcoll5')
for(i in 5:(length(ld))){
y <- unlist(strsplit(ld[i], split = "Year"))[2]
cat("Year: ", y, "...")
#### VIIRS_LCC and Defo ####
flLCC <- list.files(path = ld[i], pattern = "LCC", full.names = T)
flLCC <- grep(pattern = ".shp", flLCC, value = T)
if(length(flLCC)==0) next()
VIIRS_LCC <- read_sf(flLCC)
head(VIIRS_LCC)
## create var LATLONG and unique ID
VIIRS_LCC <- VIIRS_LCC %>% dplyr::mutate(LATLONG = paste0(LATITUDE,'_', LONGITUDE)) %>%
dplyr::mutate(DATE = paste0(year,'-',monthNo,'-',day)) %>%
dplyr::mutate(UniqueID = paste0(LATLONG, '_', DATE, '_', ACQ_TIME))
range(VIIRS_LCC$DATE)
## filter duplicates out (created by GEE)
VIIRS_LCC_NoDups <- VIIRS_LCC %>% dplyr::filter(!duplicated(UniqueID))
## rename var 'first'
VIIRS_LCC_NoDups <- VIIRS_LCC_NoDups %>% rename(LandCoverClassNumber = first)
## join land cover classes number with names from MapBiomas
VIIRS_LCC_NoDups_MBcode <- VIIRS_LCC_NoDups %>% dplyr::left_join(MBClassCode, by='LandCoverClassNumber')
VIIRS_LCC_NoDups_MBcode$LCC_name <- as.factor(VIIRS_LCC_NoDups_MBcode$LCC_name)
#### VIIRS LT ####
## LT code from Atlas
flLT <- list.files(path = ld[i], pattern = "LT", full.names = T)
flLT <- grep(pattern = ".shp", flLT, value = T)
VIIRS_LT <- read_sf(flLT)
## create var LATLONG and unique ID, and select some vars out (already covered in the LCC dataset)
VIIRS_LT <- VIIRS_LT %>% dplyr::mutate(LATLONG = paste0(LATITUDE,'_', LONGITUDE)) %>%
dplyr::mutate(DATE = paste0(year,'-',monthNo,'-',day)) %>%
dplyr::mutate(UniqueID = paste0(LATLONG, '_', DATE, '_', ACQ_TIME)) %>% select(UniqueID, first)
VIIRS_LT_NoDups <- VIIRS_LT %>% dplyr::filter(!duplicated(UniqueID))
## rename var 'first'
VIIRS_LT_NoDups <- VIIRS_LT_NoDups %>% rename(LandTenureClassNumber = first)
## join land cover classes number with names from MapBiomas
VIIRS_LT_NoDups_Atlascode <- VIIRS_LT_NoDups %>% dplyr::left_join(AtlasAgroClassCode, by='LandTenureClassNumber')
VIIRS_LT_NoDups_Atlascode$LT_name <- as.factor(VIIRS_LT_NoDups_Atlascode$LT_name)
#### VIIRS PA ####
flPA <- list.files(path = ld[i], pattern = "PA", full.names = T)
flPA <- grep(pattern = ".shp", flPA, value = T)
VIIRS_PA <- read_sf(flPA)
## create var LATLONG and unique ID, and select some vars out (already covered in the LCC dataset)
VIIRS_PA <- VIIRS_PA %>% dplyr::mutate(LATLONG = paste0(LATITUDE,'_', LONGITUDE)) %>%
dplyr::mutate(DATE = paste0(year,'-',monthNo,'-',day)) %>%
dplyr::mutate(UniqueID = paste0(LATLONG, '_', DATE, '_', ACQ_TIME)) %>%
select(UniqueID, DESIG_ENG, NAME, IUCN_CAT, REP_AREA)
## filter duplicates out (created by GEE)
VIIRS_PA_NoDups <- VIIRS_PA %>% dplyr::filter(!duplicated(UniqueID))
## Name areas with NA with outside PA
## 'name'
VIIRS_PA_NoDups$NAME[is.na(VIIRS_PA_NoDups$NAME)] <- 'OutsidePA'
VIIRS_PA_NoDups$NAME <- as.factor(VIIRS_PA_NoDups$NAME)
## 'desig'
VIIRS_PA_NoDups$DESIG_ENG[is.na(VIIRS_PA_NoDups$DESIG_ENG)] <- 'OutsidePA'
VIIRS_PA_NoDups$DESIG_ENG <- as.factor(VIIRS_PA_NoDups$DESIG_ENG)
## Fix names acentos
s <- as.character(VIIRS_PA_NoDups$NAME) ##
Encoding(s) <- "latin1"
VIIRS_PA_NoDups$NAME <- iconv(s,from="latin1",to="ASCII//TRANSLIT") ## ok
VIIRS_PA_NoDups$NAME <- as.factor(VIIRS_PA_NoDups$NAME)
VIIRS_PA_NoDups$DESIG_ENG <- as.factor(VIIRS_PA_NoDups$DESIG_ENG)
#### VIIRS States and Municipalities ####
flStMun <- list.files(path = ld[i], pattern = "States_Municipalities", full.names = T)
flStMun <- grep(pattern = ".shp", flStMun, value = T)
VIIRS_StMun <- read_sf(flStMun)
## create var LATLONG and unique ID, and select some vars out (already covered in the LCC dataset)
VIIRS_StMun <- VIIRS_StMun %>% dplyr::mutate(LATLONG = paste0(LATITUDE,'_', LONGITUDE)) %>%
dplyr::mutate(DATE = paste0(year,'-',monthNo,'-',day)) %>%
dplyr::mutate(UniqueID = paste0(LATLONG, '_', DATE, '_', ACQ_TIME)) %>%
select(UniqueID, NM_ESTADO, nm_municip)
## filter duplicates out (created by GEE, but just in case)
VIIRS_StMun_NoDups <- VIIRS_StMun %>% dplyr::filter(!duplicated(UniqueID))
## Fix names acentos
# state
u <- as.character(VIIRS_StMun$NM_ESTADO)
Encoding(u) <- "latin1"
VIIRS_StMun$NM_ESTADO <- iconv(u,from="latin1",to="ASCII//TRANSLIT")
VIIRS_StMun$NM_ESTADO <- as.factor(VIIRS_StMun$NM_ESTADO)
# municip
t <- as.character(VIIRS_StMun$nm_municip)
Encoding(t) <- "latin1"
VIIRS_StMun$nm_municip <- iconv(t,from="latin1",to="ASCII//TRANSLIT")
VIIRS_StMun$nm_municip <- as.factor(VIIRS_StMun$nm_municip)
#### VIIRS immediate regions ####
flImmedReg <- list.files(path = ld[i], pattern = "Regions", full.names = T)
flImmedReg <- grep(pattern = ".shp", flImmedReg, value = T)
VIIRS_ImmedReg <- read_sf(flImmedReg)
## create var LATLONG and unique ID, and select some vars out (already covered in the LCC dataset)
VIIRS_ImmedReg <- VIIRS_ImmedReg %>% dplyr::mutate(LATLONG = paste0(LATITUDE,'_', LONGITUDE)) %>%
dplyr::mutate(DATE = paste0(year,'-',monthNo,'-',day)) %>%
dplyr::mutate(UniqueID = paste0(LATLONG, '_', DATE, '_', ACQ_TIME)) %>%
select(UniqueID, nome_rgint, nome_rgi)
## Fix names acentos
# intermediate regions (mesoregions)
v <- as.character(VIIRS_ImmedReg$nome_rgint)
VIIRS_ImmedReg$nome_rgint <- iconv(v,from="latin1",to="ASCII//TRANSLIT")
VIIRS_ImmedReg$nome_rgint <- as.factor(VIIRS_ImmedReg$nome_rgint) #
unique(VIIRS_ImmedReg$nome_rgint) # 29 mesoregions
## immediate regions
w <- as.character(VIIRS_ImmedReg$nome_rgi)
VIIRS_ImmedReg$nome_rgi <- iconv(w,from="latin1",to="ASCII//TRANSLIT")
VIIRS_ImmedReg$nome_rgi <- as.factor(VIIRS_ImmedReg$nome_rgi) #
unique(VIIRS_ImmedReg$nome_rgi) # 83 immediate regions
#### save as sf ####
VIIRS_LCC_NoDups_MBcode_tb <- as_tibble(VIIRS_LCC_NoDups_MBcode)
VIIRS_LT_NoDups_Atlascode_tb <- as_tibble(VIIRS_LT_NoDups_Atlascode) %>% select(-geometry)
VIIRS_PA_NoDups_tb <- as_tibble(VIIRS_PA_NoDups) %>% select(-geometry)
VIIRS_StMun_tb <- as_tibble(VIIRS_StMun) %>% select(-geometry)
VIIRS_ImmedReg_tb <- as_tibble(VIIRS_ImmedReg) %>% select(-geometry)
## remove: VIIRS_Chirps_tb <- as_tibble(VIIRS_Chirps) %>% select(-geometry)
## join all ####
VIIRS_LCC_LT_PA_StMun_ImmedReg <- VIIRS_LCC_NoDups_MBcode_tb %>%
left_join(VIIRS_LT_NoDups_Atlascode_tb, by='UniqueID') %>%
left_join(VIIRS_PA_NoDups_tb, by='UniqueID') %>%
left_join(VIIRS_StMun_tb, by='UniqueID') %>%
left_join(VIIRS_ImmedReg_tb, by='UniqueID')
## SF
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf <- st_as_sf(VIIRS_LCC_LT_PA_StMun_ImmedReg)
## arranging the whole dataset
## Create Months names ####
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month <- NA
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("01")] <- 'Jan'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("02")] <- 'Feb'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("03")] <- 'Mar'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("04")] <- 'Apr'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("05")] <- 'May'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("06")] <- 'Jun'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("07")] <- 'Jul'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("08")] <- 'Aug'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("09")] <- 'Sep'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("10")] <- 'Oct'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("11")] <- 'Nov'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("12")] <- 'Dec'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month <- as.factor(VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month)
##
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ <- NA
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ[
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$DESIG_ENG%in%c('Ecological Station','Biological Reserve','Park','Wildlife Refuge',
'World Heritage Site (natural or mixed)')] <- 'Strictly Protected'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ[
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$DESIG_ENG%in%c('Environmental Protection Area','Area of Relevant Ecological Interest',
'Forest','Extractive Reserve','Sustainable Development Reserve',
'Ramsar Site, Wetland of International Importance')] <- 'Sustainable Use'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ[
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$DESIG_ENG%in%c('Indigenous Area','Indigenous Reserve')] <- 'Indigenous Territory'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$DESIG_ENG%in%c('OutsidePA')] <- 'Not Protected'
#
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ <- as.factor(VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ)
## save
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf <- VIIRS_LCC_LT_PA_StMun_ImmedReg_sf %>% select(-ACQ_DATE)
write_sf(VIIRS_LCC_LT_PA_StMun_ImmedReg_sf,
dsn = paste0("~/Oxford/FireAmazon2019_GCRF/DataFromR/Phase6_Climatic/VIIRS_LCC_LT_PA_StMun_ImmedReg_AllYearsDats/VIIRS_LCC_LT_PA_StMun_ImmedReg_",y,"_sf.shp"),
layer = paste0("VIIRS_LCC_LT_PA_StMun_ImmedReg_",y,"_sf"), driver = "ESRI Shapefile")
cat("\n")
}
|
/1_AllDataManag_VIIRS_all_years_separately.R
|
no_license
|
ManoelaMachadoEco/EmergencyPoliciesFireAmazon
|
R
| false
| false
| 11,201
|
r
|
## Manoela 16 November 2020
## Script to arrange VIIRS data + land cover classes + Protected Areas + Land tenure + States and Municipalities + Precipitation + Immediate regions
## Data:
## VIIRS_LCC and Defo
## VIIRS LT
## VIIRS PA
## VIIRS States and Municipalities
## VIIRS immediate regions (24Sep)
## updating on 07 Jan with 2020 data
#########################################
rm(list=ls())
require("tidyverse")
require("dplyr")
require("sf")
require("ggplot2")
require("lubridate")
require("plotly")
require("formattable")
MBClassCode <- read_csv('~/Oxford/FireAmazon2019_GCRF/DataFromGEE/Phase6_MBcoll5/MapBiomasClassesCODE_forPhase6.csv')
MBClassCode <- MBClassCode %>% select(-X5) %>% select(LCC_Eng, LCC_value) %>%
separate(LCC_Eng, sep= '\\. ', into=c('del', 'LCC_name'), fill='left') %>%
select(-del) %>% rename(LandCoverClassNumber = LCC_value)
AtlasAgroClassCode <- read_csv('~/Oxford/FireAmazon2019_GCRF/DataFromGEE/Phase6_MBcoll5/LandTenureATLASClassesCODE.csv')
AtlasAgroClassCode <- AtlasAgroClassCode %>% select(LandTenureClassNo, ClassEng) %>%
rename(LandTenureClassNumber = LandTenureClassNo) %>%
rename(LT_name = ClassEng)
## loop for all , done that. today only 2020
i <- 13
ld[13] # start in 5
ld <- list.dirs('~/Oxford/FireAmazon2019_GCRF/DataFromGEE/Phase6_MBcoll5')
for(i in 5:(length(ld))){
y <- unlist(strsplit(ld[i], split = "Year"))[2]
cat("Year: ", y, "...")
#### VIIRS_LCC and Defo ####
flLCC <- list.files(path = ld[i], pattern = "LCC", full.names = T)
flLCC <- grep(pattern = ".shp", flLCC, value = T)
if(length(flLCC)==0) next()
VIIRS_LCC <- read_sf(flLCC)
head(VIIRS_LCC)
## create var LATLONG and unique ID
VIIRS_LCC <- VIIRS_LCC %>% dplyr::mutate(LATLONG = paste0(LATITUDE,'_', LONGITUDE)) %>%
dplyr::mutate(DATE = paste0(year,'-',monthNo,'-',day)) %>%
dplyr::mutate(UniqueID = paste0(LATLONG, '_', DATE, '_', ACQ_TIME))
range(VIIRS_LCC$DATE)
## filter duplicates out (created by GEE)
VIIRS_LCC_NoDups <- VIIRS_LCC %>% dplyr::filter(!duplicated(UniqueID))
## rename var 'first'
VIIRS_LCC_NoDups <- VIIRS_LCC_NoDups %>% rename(LandCoverClassNumber = first)
## join land cover classes number with names from MapBiomas
VIIRS_LCC_NoDups_MBcode <- VIIRS_LCC_NoDups %>% dplyr::left_join(MBClassCode, by='LandCoverClassNumber')
VIIRS_LCC_NoDups_MBcode$LCC_name <- as.factor(VIIRS_LCC_NoDups_MBcode$LCC_name)
#### VIIRS LT ####
## LT code from Atlas
flLT <- list.files(path = ld[i], pattern = "LT", full.names = T)
flLT <- grep(pattern = ".shp", flLT, value = T)
VIIRS_LT <- read_sf(flLT)
## create var LATLONG and unique ID, and select some vars out (already covered in the LCC dataset)
VIIRS_LT <- VIIRS_LT %>% dplyr::mutate(LATLONG = paste0(LATITUDE,'_', LONGITUDE)) %>%
dplyr::mutate(DATE = paste0(year,'-',monthNo,'-',day)) %>%
dplyr::mutate(UniqueID = paste0(LATLONG, '_', DATE, '_', ACQ_TIME)) %>% select(UniqueID, first)
VIIRS_LT_NoDups <- VIIRS_LT %>% dplyr::filter(!duplicated(UniqueID))
## rename var 'first'
VIIRS_LT_NoDups <- VIIRS_LT_NoDups %>% rename(LandTenureClassNumber = first)
## join land cover classes number with names from MapBiomas
VIIRS_LT_NoDups_Atlascode <- VIIRS_LT_NoDups %>% dplyr::left_join(AtlasAgroClassCode, by='LandTenureClassNumber')
VIIRS_LT_NoDups_Atlascode$LT_name <- as.factor(VIIRS_LT_NoDups_Atlascode$LT_name)
#### VIIRS PA ####
flPA <- list.files(path = ld[i], pattern = "PA", full.names = T)
flPA <- grep(pattern = ".shp", flPA, value = T)
VIIRS_PA <- read_sf(flPA)
## create var LATLONG and unique ID, and select some vars out (already covered in the LCC dataset)
VIIRS_PA <- VIIRS_PA %>% dplyr::mutate(LATLONG = paste0(LATITUDE,'_', LONGITUDE)) %>%
dplyr::mutate(DATE = paste0(year,'-',monthNo,'-',day)) %>%
dplyr::mutate(UniqueID = paste0(LATLONG, '_', DATE, '_', ACQ_TIME)) %>%
select(UniqueID, DESIG_ENG, NAME, IUCN_CAT, REP_AREA)
## filter duplicates out (created by GEE)
VIIRS_PA_NoDups <- VIIRS_PA %>% dplyr::filter(!duplicated(UniqueID))
## Name areas with NA with outside PA
## 'name'
VIIRS_PA_NoDups$NAME[is.na(VIIRS_PA_NoDups$NAME)] <- 'OutsidePA'
VIIRS_PA_NoDups$NAME <- as.factor(VIIRS_PA_NoDups$NAME)
## 'desig'
VIIRS_PA_NoDups$DESIG_ENG[is.na(VIIRS_PA_NoDups$DESIG_ENG)] <- 'OutsidePA'
VIIRS_PA_NoDups$DESIG_ENG <- as.factor(VIIRS_PA_NoDups$DESIG_ENG)
## Fix names acentos
s <- as.character(VIIRS_PA_NoDups$NAME) ##
Encoding(s) <- "latin1"
VIIRS_PA_NoDups$NAME <- iconv(s,from="latin1",to="ASCII//TRANSLIT") ## ok
VIIRS_PA_NoDups$NAME <- as.factor(VIIRS_PA_NoDups$NAME)
VIIRS_PA_NoDups$DESIG_ENG <- as.factor(VIIRS_PA_NoDups$DESIG_ENG)
#### VIIRS States and Municipalities ####
flStMun <- list.files(path = ld[i], pattern = "States_Municipalities", full.names = T)
flStMun <- grep(pattern = ".shp", flStMun, value = T)
VIIRS_StMun <- read_sf(flStMun)
## create var LATLONG and unique ID, and select some vars out (already covered in the LCC dataset)
VIIRS_StMun <- VIIRS_StMun %>% dplyr::mutate(LATLONG = paste0(LATITUDE,'_', LONGITUDE)) %>%
dplyr::mutate(DATE = paste0(year,'-',monthNo,'-',day)) %>%
dplyr::mutate(UniqueID = paste0(LATLONG, '_', DATE, '_', ACQ_TIME)) %>%
select(UniqueID, NM_ESTADO, nm_municip)
## filter duplicates out (created by GEE, but just in case)
VIIRS_StMun_NoDups <- VIIRS_StMun %>% dplyr::filter(!duplicated(UniqueID))
## Fix names acentos
# state
u <- as.character(VIIRS_StMun$NM_ESTADO)
Encoding(u) <- "latin1"
VIIRS_StMun$NM_ESTADO <- iconv(u,from="latin1",to="ASCII//TRANSLIT")
VIIRS_StMun$NM_ESTADO <- as.factor(VIIRS_StMun$NM_ESTADO)
# municip
t <- as.character(VIIRS_StMun$nm_municip)
Encoding(t) <- "latin1"
VIIRS_StMun$nm_municip <- iconv(t,from="latin1",to="ASCII//TRANSLIT")
VIIRS_StMun$nm_municip <- as.factor(VIIRS_StMun$nm_municip)
#### VIIRS immediate regions ####
flImmedReg <- list.files(path = ld[i], pattern = "Regions", full.names = T)
flImmedReg <- grep(pattern = ".shp", flImmedReg, value = T)
VIIRS_ImmedReg <- read_sf(flImmedReg)
## create var LATLONG and unique ID, and select some vars out (already covered in the LCC dataset)
VIIRS_ImmedReg <- VIIRS_ImmedReg %>% dplyr::mutate(LATLONG = paste0(LATITUDE,'_', LONGITUDE)) %>%
dplyr::mutate(DATE = paste0(year,'-',monthNo,'-',day)) %>%
dplyr::mutate(UniqueID = paste0(LATLONG, '_', DATE, '_', ACQ_TIME)) %>%
select(UniqueID, nome_rgint, nome_rgi)
## Fix names acentos
# intermediate regions (mesoregions)
v <- as.character(VIIRS_ImmedReg$nome_rgint)
VIIRS_ImmedReg$nome_rgint <- iconv(v,from="latin1",to="ASCII//TRANSLIT")
VIIRS_ImmedReg$nome_rgint <- as.factor(VIIRS_ImmedReg$nome_rgint) #
unique(VIIRS_ImmedReg$nome_rgint) # 29 mesoregions
## immediate regions
w <- as.character(VIIRS_ImmedReg$nome_rgi)
VIIRS_ImmedReg$nome_rgi <- iconv(w,from="latin1",to="ASCII//TRANSLIT")
VIIRS_ImmedReg$nome_rgi <- as.factor(VIIRS_ImmedReg$nome_rgi) #
unique(VIIRS_ImmedReg$nome_rgi) # 83 immediate regions
#### save as sf ####
VIIRS_LCC_NoDups_MBcode_tb <- as_tibble(VIIRS_LCC_NoDups_MBcode)
VIIRS_LT_NoDups_Atlascode_tb <- as_tibble(VIIRS_LT_NoDups_Atlascode) %>% select(-geometry)
VIIRS_PA_NoDups_tb <- as_tibble(VIIRS_PA_NoDups) %>% select(-geometry)
VIIRS_StMun_tb <- as_tibble(VIIRS_StMun) %>% select(-geometry)
VIIRS_ImmedReg_tb <- as_tibble(VIIRS_ImmedReg) %>% select(-geometry)
## remove: VIIRS_Chirps_tb <- as_tibble(VIIRS_Chirps) %>% select(-geometry)
## join all ####
VIIRS_LCC_LT_PA_StMun_ImmedReg <- VIIRS_LCC_NoDups_MBcode_tb %>%
left_join(VIIRS_LT_NoDups_Atlascode_tb, by='UniqueID') %>%
left_join(VIIRS_PA_NoDups_tb, by='UniqueID') %>%
left_join(VIIRS_StMun_tb, by='UniqueID') %>%
left_join(VIIRS_ImmedReg_tb, by='UniqueID')
## SF
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf <- st_as_sf(VIIRS_LCC_LT_PA_StMun_ImmedReg)
## arranging the whole dataset
## Create Months names ####
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month <- NA
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("01")] <- 'Jan'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("02")] <- 'Feb'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("03")] <- 'Mar'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("04")] <- 'Apr'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("05")] <- 'May'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("06")] <- 'Jun'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("07")] <- 'Jul'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("08")] <- 'Aug'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("09")] <- 'Sep'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("10")] <- 'Oct'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("11")] <- 'Nov'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$monthNo%in%c("12")] <- 'Dec'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month <- as.factor(VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$Month)
##
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ <- NA
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ[
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$DESIG_ENG%in%c('Ecological Station','Biological Reserve','Park','Wildlife Refuge',
'World Heritage Site (natural or mixed)')] <- 'Strictly Protected'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ[
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$DESIG_ENG%in%c('Environmental Protection Area','Area of Relevant Ecological Interest',
'Forest','Extractive Reserve','Sustainable Development Reserve',
'Ramsar Site, Wetland of International Importance')] <- 'Sustainable Use'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ[
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$DESIG_ENG%in%c('Indigenous Area','Indigenous Reserve')] <- 'Indigenous Territory'
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ[VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$DESIG_ENG%in%c('OutsidePA')] <- 'Not Protected'
#
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ <- as.factor(VIIRS_LCC_LT_PA_StMun_ImmedReg_sf$PA_Categ)
## save
VIIRS_LCC_LT_PA_StMun_ImmedReg_sf <- VIIRS_LCC_LT_PA_StMun_ImmedReg_sf %>% select(-ACQ_DATE)
write_sf(VIIRS_LCC_LT_PA_StMun_ImmedReg_sf,
dsn = paste0("~/Oxford/FireAmazon2019_GCRF/DataFromR/Phase6_Climatic/VIIRS_LCC_LT_PA_StMun_ImmedReg_AllYearsDats/VIIRS_LCC_LT_PA_StMun_ImmedReg_",y,"_sf.shp"),
layer = paste0("VIIRS_LCC_LT_PA_StMun_ImmedReg_",y,"_sf"), driver = "ESRI Shapefile")
cat("\n")
}
|
#' Data from Brennan
#'
#' A dataset containing 4 variables: Person, Item, Occasion, & Score
#'
#' @format A data frame with 80 rows and 4 variables:
#' \describe{
#' \item{Person}{Person ID}
#' \item{Item}{Item ID}
#' \item{Occasion}{Occasion ID}
#' \item{Score}{Given score}
#' }
"Brennan.3.1"
|
/R/data.R
|
permissive
|
alanhuebner10/Gboot
|
R
| false
| false
| 305
|
r
|
#' Data from Brennan
#'
#' A dataset containing 4 variables: Person, Item, Occasion, & Score
#'
#' @format A data frame with 80 rows and 4 variables:
#' \describe{
#' \item{Person}{Person ID}
#' \item{Item}{Item ID}
#' \item{Occasion}{Occasion ID}
#' \item{Score}{Given score}
#' }
"Brennan.3.1"
|
# Single server -----------------------------------------------------------
T = 8
lambda = 12
mu = 1/5
sigma = 1/80
t = 0
na = 0
nd = 0
ss = 0
ta = rexp(1, rate = lambda)
td = Inf
n = 0
arrivals = numeric(2 * ceiling (T * lambda))
departures = numeric(2 * ceiling (T * lambda))
next_dep = function (){
max(0, rnorm(1, mean = mu, sd = sigma))
}
while (min(ta, td) < T) {
if (ta <= td) {
t = ta
na = na + 1
n = n + 1
ta = ta + rexp(1, rate = lambda)
if (n == 1) {
td = t + next_dep()
}
if (na > length(arrivals)) {
arrivals = c(arrivals, numeric(100))
departures = c(departures, numeric(100))
}
arrivals[na] = t
} else {
t = td
n = n - 1
nd = nd + 1
td = ifelse (nd == 0, Inf, td + next_dep())
departures[nd] = t
}
}
while (n > 0) {
t = td
n = n - 1
nd = nd + 1
td = td + next_dep()
departures[nd] = t
}
tp = max (t- T, 0)
arrivals = arrivals[1:na]
departures = departures[1:nd]
# Servers in series -------------------------------------------------------
maxn = 10000
lambda = 12
mu1 = 1/6
mu2 = 1/3
sigma = 1/80
t = 0
n1 = 0
n2 = 0
na = 0
nd = 0
ta = rexp(1, lambda)
t1 = Inf
t2 = Inf
arrivals1 = numeric(maxn)
arrivals2 = numeric(maxn)
departures = numeric(maxn)
next_dep = function (mu){
max(0, rnorm(1, mean = mu, sd = sigma))
}
while (na < maxn) {
if (ta <= t1 & ta <= t2) {
t = ta
na = na + 1
n1 = n1 + 1
ta = ta + rexp(1, lambda)
if (n1 == 1) {
t1 = t + next_dep(mu1)
}
arrivals1[na] = t
} else if (t1 < ta & t1 <= t2) {
t = t1
n1 = n1 - 1
n2 = n2 + 1
t1 = ifelse (n1 == 0, Inf, t + next_dep(mu1))
if (n2 == 1) {
t2 = t + next_dep(mu2)
}
arrivals2[na - n1] = t
} else {
t = t2
nd = nd + 1
n2 = n2 - 1
t2 = ifelse (n2 == 0, Inf, t + next_dep(mu2))
departures[nd] = t
}
}
arrivals2 = arrivals2 [1:(na - n1)]
delartures = departures[1:nd]
(na-n1)/na
nd/na
# Inventory Model ---------------------------------------------------------
lambda = 20
users_demand = function () {sample(1:5, 1)}
L = 3
h = 100
c = function (y) {2e3 * y + 2e4}
r = 3e3
maxT = 10^4
s = 180
S = 700
R = 0
C = 0
H = 0
t = 0
x = 1000
y = 0
t0 = rexp(1, lambda)
t1 = Inf
while (min(t0, t1) < maxT) {
if (t0 < t1) {
H = H + (t0 - t) * x * h
t = t0
w = min (x , users_demand())
R = R + w * r
x = x - w
if (x < s & y == 0) {
y = S - x
t1 = t + L
}
t0 = t0 + rexp(1, lambda)
} else {
H = H + (t1 - t0) * x * h
t = t1
C = C + c(y)
x = x + y
y = 0
t1 = Inf
}
}
(R - C - H) / maxT
# An Insurance Risk Model -------------------------------------------------
nsim = 1e3
nu = 10
lambda = 2/365
mu = 1/365
c = 2e3
n0 = 100
a0 = 1e6
maxT = 365
f = function() {max(rnorm(1, mean = 3e5, sd= 4e4), 0)}
npositive = 0
i = 1
for (i in 1 : nsim) {
t = 0
n = n0
a = a0
I = 1
while (t < maxT) {
te = t + rexp (1, nu + n*mu + n * lambda)
a = a + n * c * (te - t)
t = te
r = runif(1)
if (r <= nu / (nu + n * mu + n * lambda)) {
n = n + 1
} else if (r <= (nu + n * mu) / (nu + n * mu + n * lambda)) {
n = n - 1
} else {
y = f()
if (y > a) {
I = 0
break
} else {
a = a - y
}
}
}
if (i %% 100 == 0) print(i)
npositive = npositive + I
}
npositive / nsim
|
/Discrete Events Approach.R
|
no_license
|
mirsadeghi13/Simulation-98-2
|
R
| false
| false
| 3,211
|
r
|
# Single server -----------------------------------------------------------
T = 8
lambda = 12
mu = 1/5
sigma = 1/80
t = 0
na = 0
nd = 0
ss = 0
ta = rexp(1, rate = lambda)
td = Inf
n = 0
arrivals = numeric(2 * ceiling (T * lambda))
departures = numeric(2 * ceiling (T * lambda))
next_dep = function (){
max(0, rnorm(1, mean = mu, sd = sigma))
}
while (min(ta, td) < T) {
if (ta <= td) {
t = ta
na = na + 1
n = n + 1
ta = ta + rexp(1, rate = lambda)
if (n == 1) {
td = t + next_dep()
}
if (na > length(arrivals)) {
arrivals = c(arrivals, numeric(100))
departures = c(departures, numeric(100))
}
arrivals[na] = t
} else {
t = td
n = n - 1
nd = nd + 1
td = ifelse (nd == 0, Inf, td + next_dep())
departures[nd] = t
}
}
while (n > 0) {
t = td
n = n - 1
nd = nd + 1
td = td + next_dep()
departures[nd] = t
}
tp = max (t- T, 0)
arrivals = arrivals[1:na]
departures = departures[1:nd]
# Servers in series -------------------------------------------------------
maxn = 10000
lambda = 12
mu1 = 1/6
mu2 = 1/3
sigma = 1/80
t = 0
n1 = 0
n2 = 0
na = 0
nd = 0
ta = rexp(1, lambda)
t1 = Inf
t2 = Inf
arrivals1 = numeric(maxn)
arrivals2 = numeric(maxn)
departures = numeric(maxn)
next_dep = function (mu){
max(0, rnorm(1, mean = mu, sd = sigma))
}
while (na < maxn) {
if (ta <= t1 & ta <= t2) {
t = ta
na = na + 1
n1 = n1 + 1
ta = ta + rexp(1, lambda)
if (n1 == 1) {
t1 = t + next_dep(mu1)
}
arrivals1[na] = t
} else if (t1 < ta & t1 <= t2) {
t = t1
n1 = n1 - 1
n2 = n2 + 1
t1 = ifelse (n1 == 0, Inf, t + next_dep(mu1))
if (n2 == 1) {
t2 = t + next_dep(mu2)
}
arrivals2[na - n1] = t
} else {
t = t2
nd = nd + 1
n2 = n2 - 1
t2 = ifelse (n2 == 0, Inf, t + next_dep(mu2))
departures[nd] = t
}
}
arrivals2 = arrivals2 [1:(na - n1)]
delartures = departures[1:nd]
(na-n1)/na
nd/na
# Inventory Model ---------------------------------------------------------
lambda = 20
users_demand = function () {sample(1:5, 1)}
L = 3
h = 100
c = function (y) {2e3 * y + 2e4}
r = 3e3
maxT = 10^4
s = 180
S = 700
R = 0
C = 0
H = 0
t = 0
x = 1000
y = 0
t0 = rexp(1, lambda)
t1 = Inf
while (min(t0, t1) < maxT) {
if (t0 < t1) {
H = H + (t0 - t) * x * h
t = t0
w = min (x , users_demand())
R = R + w * r
x = x - w
if (x < s & y == 0) {
y = S - x
t1 = t + L
}
t0 = t0 + rexp(1, lambda)
} else {
H = H + (t1 - t0) * x * h
t = t1
C = C + c(y)
x = x + y
y = 0
t1 = Inf
}
}
(R - C - H) / maxT
# An Insurance Risk Model -------------------------------------------------
nsim = 1e3
nu = 10
lambda = 2/365
mu = 1/365
c = 2e3
n0 = 100
a0 = 1e6
maxT = 365
f = function() {max(rnorm(1, mean = 3e5, sd= 4e4), 0)}
npositive = 0
i = 1
for (i in 1 : nsim) {
t = 0
n = n0
a = a0
I = 1
while (t < maxT) {
te = t + rexp (1, nu + n*mu + n * lambda)
a = a + n * c * (te - t)
t = te
r = runif(1)
if (r <= nu / (nu + n * mu + n * lambda)) {
n = n + 1
} else if (r <= (nu + n * mu) / (nu + n * mu + n * lambda)) {
n = n - 1
} else {
y = f()
if (y > a) {
I = 0
break
} else {
a = a - y
}
}
}
if (i %% 100 == 0) print(i)
npositive = npositive + I
}
npositive / nsim
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{h2o.scale}
\alias{h2o.scale}
\alias{scale.H2OFrame}
\title{Scaling and Centering of an H2OFrame}
\usage{
h2o.scale(x, center = TRUE, scale = TRUE)
\method{scale}{H2OFrame}(x, center = TRUE, scale = TRUE)
}
\arguments{
\item{x}{An H2OFrame object.}
\item{center}{either a \code{logical} value or numeric vector of length equal to the number of columns of x.}
\item{scale}{either a \code{logical} value or numeric vector of length equal to the number of columns of x.}
}
\description{
Centers and/or scales the columns of an H2O dataset.
}
\examples{
\donttest{
library(h2o)
h2o.init()
irisPath <- system.file("extdata", "iris_wheader.csv", package="h2o")
iris.hex <- h2o.uploadFile(path = irisPath, destination_frame = "iris.hex")
summary(iris.hex)
# Scale and center all the numeric columns in iris data set
scale(iris.hex[, 1:4])
}
}
|
/h2o_3.10.4.4/h2o/man/h2o.scale.Rd
|
no_license
|
JoeyChiese/gitKraken_test
|
R
| false
| false
| 899
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{h2o.scale}
\alias{h2o.scale}
\alias{scale.H2OFrame}
\title{Scaling and Centering of an H2OFrame}
\usage{
h2o.scale(x, center = TRUE, scale = TRUE)
\method{scale}{H2OFrame}(x, center = TRUE, scale = TRUE)
}
\arguments{
\item{x}{An H2OFrame object.}
\item{center}{either a \code{logical} value or numeric vector of length equal to the number of columns of x.}
\item{scale}{either a \code{logical} value or numeric vector of length equal to the number of columns of x.}
}
\description{
Centers and/or scales the columns of an H2O dataset.
}
\examples{
\donttest{
library(h2o)
h2o.init()
irisPath <- system.file("extdata", "iris_wheader.csv", package="h2o")
iris.hex <- h2o.uploadFile(path = irisPath, destination_frame = "iris.hex")
summary(iris.hex)
# Scale and center all the numeric columns in iris data set
scale(iris.hex[, 1:4])
}
}
|
# mimic relevant part of .Rmd state ---------------------------------------
cat("\n")
# code chunk --------------------------------------------------------------
wait <- function(seconds = 2) {Sys.sleep(seconds)}
send_cat_threat <- function() {
cat("Dead girls walking.\n"); wait()
cat("--A.\n")
}
send_cat_threat()
|
/_site/posts/2021-04-18_pretty-little-clis/source/cat-example.R
|
no_license
|
jake-wittman/distill-blog
|
R
| false
| false
| 325
|
r
|
# mimic relevant part of .Rmd state ---------------------------------------
cat("\n")
# code chunk --------------------------------------------------------------
wait <- function(seconds = 2) {Sys.sleep(seconds)}
send_cat_threat <- function() {
cat("Dead girls walking.\n"); wait()
cat("--A.\n")
}
send_cat_threat()
|
myfunction <- function(x){
x <- rnorm(100)
mean(x)
}
|
/R Programming/myfunction.R
|
no_license
|
studoma/datasciencecoursera
|
R
| false
| false
| 56
|
r
|
myfunction <- function(x){
x <- rnorm(100)
mean(x)
}
|
# caluclate BLUPs on the rlog_clean data,
# may need further filtering on n% zero values, but can subset them from BLUP results
########################
align<-"hisat"
transf<-"rlog"
#########################
# reads_cutoff<-2 # 1 or 2 for 1M (2M) cutoff: reads per library
counts_1<-read.table(paste("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/Hisat_cleanedQC_counts_v4_rep1_rlog_clean.txt",sep=""),row.names=1)
counts_2<-read.table(paste("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/Hisat_cleanedQC_counts_v4_rep2_rlog_clean.txt",sep=""),row.names=1)
## not correct to use _1M. Need to check !!!!
# counts_1<-read.table(paste("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/Hisat_QC_cmp1_cor74_rep1_1M.txt",sep=""),header=T,sep="\t",row.names=1)
# counts_2<-read.table(paste("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/Hisat_QC_cmp1_cor74_rep2_1M.txt",sep=""),header=T,sep="\t",row.names=1)
# local
counts_1<-read.table(paste("/Users/Meng/Desktop/RNAseq_temp/Hisat_cleanedQC_counts_v4_rep1_rlog_clean.txt",sep=""),row.names=1)
counts_2<-read.table(paste("/Users/Meng/Desktop/RNAseq_temp/Hisat_cleanedQC_counts_v4_rep2_rlog_clean.txt",sep=""),row.names=1)
cm_gene<-intersect(rownames(counts_1),rownames(counts_2))
nm_gene<-length(cm_gene) # 21435 common genes, may need further filtering
#cm_line<-intersect(colnames(counts_1),colnames(counts_2))
#taxa<-read.table("/Users/Meng/Desktop/LabServer/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/taxa310_pheno_geno_rna.txt",header=F,stringsAsFactors=F)
taxa<-read.table("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/taxa310_pheno_geno_rna.txt",header=F,stringsAsFactors=F)
taxa<-taxa[,1]
counts_1_e<-counts_1[cm_gene,which(colnames(counts_1) %in% taxa)]
counts_1_c<-counts_1[cm_gene,grep("MO17",colnames(counts_1),fixed=T)]
cm_counts_1<-cbind(counts_1_e,counts_1_c)
counts_2_e<-counts_2[cm_gene,which(colnames(counts_2) %in% taxa)]
counts_2_c<-counts_2[cm_gene,grep("MO17",colnames(counts_2),fixed=T)]
cm_counts_2<-cbind(counts_2_e,counts_2_c)
#### for uploading RNA-seq data ####
setwd ("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/sequence_upload")
write.table(c(colnames(counts_1_e),colnames(counts_1_c)),"RNA-seq_samples_to_upload_rep1.txt",col.names=F,row.names=F,sep="\t",quote=F)
write.table(c(colnames(counts_2_e),colnames(counts_2_c)),"RNA-seq_samples_to_upload_rep2.txt",col.names=F,row.names=F,sep="\t",quote=F)
length(c(colnames(counts_1_e),colnames(counts_1_c)))
#####################################
which(rownames(cm_counts_1)!=rownames(cm_counts_2))
#length(union(colnames(cm_counts_1),colnames(cm_counts_2)))
taxa[which(taxa %in% union(colnames(cm_counts_1),colnames(cm_counts_2)))]
#### run rep 1 and rep 2 sequencially, and stack the two results ##################
for (rep in 1:2){
if (rep==1){
counts=cm_counts_1
} else {
counts=cm_counts_2
}
############################
counts.log<-counts
cand_counts <- as.data.frame(t(counts.log))
gene_name<-colnames(cand_counts)
#######################################
## experimental design ################
#design<-read.table(paste("/home/ml2498/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/TWAS/SD18_Design_Chk_Barcode_forBLUP.txt",sep=""),header=T,sep="\t")
#design<-read.table(paste("/Users/Meng/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/TWAS/SD18_Design_Chk_Barcode_forBLUP.txt",sep=""),header=T,sep="\t")
design<-read.table(paste("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/TWAS/SD18_Design_Chk_Barcode_forBLUP.txt",sep=""),header=T,sep="\t")
design$MLC_mf<-as.character(design$MLC_mf)
design<-design[which(design$book.replication==rep),]
design$MLC_mf[grep("^[0-9]",design$MLC_mf)]<-paste("X",design$MLC_mf[grep("^[0-9]",design$MLC_mf)],sep="")
design$CHECK<-99
design$CHECK[which(design$MLC_STANDARD=="MO17")]<-rep
design$IS_EXPERIMENTAL<-1
design$IS_EXPERIMENTAL[which(design$MLC_STANDARD=="MO17")]<-0
design$COL1<-design$book.cols
## SD18 only
design$COL1[which(design$book.cols>9)]<-19-design$book.cols[which(design$book.cols>9)]
design<-design[,c(6,9:12,3,5)]
colnames(design)<-c("MLC_STANDARD", "MLC_mf", "CHECK", "IS_EXPERIMENTAL", "COL1","BLOCK","rep1")
# add plate information
#plate_info<-read.table(paste("/home/ml2498/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/RNAseq_PlateInfo_v4_rep",rep,".txt",sep=""),header=T,sep="\t")
#plate_info<-read.table(paste("/Users/Meng/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/RNAseq_PlateInfo_v4_rep",rep,".txt",sep=""),header=T,sep="\t")
plate_info<-read.table(paste("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/RNAseq_PlateInfo_v4_rep",rep,".txt",sep=""),header=T,sep="\t")
### for checking
# taxa_t<-rownames(cand_counts)
# taxa_pl<-as.character(plate_info[,1])
# taxa_d<-as.character(design$MLC_mf)
# taxa_t_pl<-intersect(taxa_t,taxa_pl)
# taxa_t_d<-intersect(taxa_t,taxa_d)
# taxa_pl_d<-intersect(taxa_pl,taxa_d)
# taxa_t.1<-read.table("/home/ml2498/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/Individuals_after_filter_rep_1.txt",header=T,sep="\t")
# taxa_t.2<-read.table("/home/ml2498/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/Individuals_after_filter_rep_2.txt",header=T,sep="\t")
# taxa_t.both<-union(taxa_t.1[,1],taxa_t.2[,1])
# taxa_final<-intersect(taxa_t.both,taxa_d)
# taxa_pheno<-as.character(pheno.all$MLC_STANDARD[!is.na(pheno.all$ft_SD18_both_untr)])
# taxa_final.2<-intersect(taxa_t.both,taxa_pheno)
## end of checking
colnames(plate_info)[1]<-"MLC_mf"
design<-merge(design,plate_info,by="MLC_mf",all=F)
cand_counts<-cbind(rownames(cand_counts),cand_counts)
colnames(cand_counts)[1]<-"MLC_mf"
rownames(cand_counts)<-NULL
#cand_counts_test<-merge(cand_counts[,1:5],design,by="MLC_mf",all=T)
cand_counts<-merge(cand_counts,design,by="MLC_mf",all=F)
#colnames(cand_counts)[ncol(cand_counts)-1]<-"CEadj"
cand_counts$rep<-rep
if(rep==1){
cand_counts_1<-cand_counts
}else{
cand_counts_2<-cand_counts
}
}
which(colnames(cand_counts_1)!=colnames(cand_counts_2))
############################################################
# Plot gene-based correlation between rep1 & rep2, before PEER
##############################################################
# cm_taxa<-intersect(as.character(cand_counts_1[,1]),as.character(cand_counts_2[,1]))
# cm_gene_taxa_1<-cand_counts_1[which(cand_counts_1[,1] %in% cm_taxa),]
# cm_gene_taxa_2<-cand_counts_2[which(cand_counts_2[,1] %in% cm_taxa),]
# which(as.character(cm_gene_taxa_1[,1])!=as.character(cm_gene_taxa_2[,1]))
# Correlations<-vector()
# for (i in 2:(1+length(cm_gene))){
# correlation<-round(cor(cm_gene_taxa_1[,i],cm_gene_taxa_2[,i]),3)
# Correlations<-c(Correlations,correlation)
# }
# setwd("/home/ml2498/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/TWAS_v4/H2_est")
# pdf("gene-based_cor_rep1vsrep2_bfPEER.pdf",height=6,width=8)
# hist(Correlations,xlab="Correlation",main="gene-based correlation between rep1 & rep2, before PEER")
# dev.off()
###########################################################
############# after running rep 1 & 2, row bind cand_counts1 & 2
cand_counts_both<-rbind.data.frame(cand_counts_1,cand_counts_2) # 313 unique lines, 245 lines in both environments; 282 rep1 and 302 rep 2
length(unique(as.character(cand_counts_both$MLC_STANDARD))) # should be 311 including MO17, 310 experimental lines
setwd("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/TWAS_v4/comb_GeneExp")
write.table(cand_counts_both,"Hisat_cleanedQC_counts_v4_repBoth_rlog_clean_wDesign.txt",row.names=F,sep="\t",quote=F)
######## BLUPs calculation (only for combined exp) #############
#### Path of the license:
lic_path = "/workdir/ml2498/ASReml/License/"
# Load asreml:
setwd(lic_path)
library(asreml)
asreml.lic(license = "asreml.lic", install = TRUE)
#################################
cand_counts=cand_counts_both
for (i in 2:(nm_gene+1)){ # for subset H2
#for (i in 2:(length(gene_name)+1)){ # for whole set BLUPs
cand_counts[,i]<-as.numeric(as.character(cand_counts[,i]))
}
cand_counts$COL1<-as.factor(cand_counts$COL1)
cand_counts$BLOCK<-as.factor(cand_counts$BLOCK)
cand_counts$CHECK<-as.factor(cand_counts$CHECK)
cand_counts$Plate<-as.factor(cand_counts$Plate)
cand_counts$rep<-as.factor(cand_counts$rep)
cand_counts$MLC_STANDARD<-as.factor(as.character(cand_counts$MLC_STANDARD))
cand_counts$IS_EXPERIMENTAL<-as.numeric(cand_counts$IS_EXPERIMENTAL)
Res<-vector()
Gvar<-vector()
GXEvar<-vector()
plate<-vector()
block<-vector()
colm<-vector()
rep<-vector()
######################################
# If only want to have BLUPs
######################################
# only run this line when BLUPs are needed
count_Blup<-vector()
#Residual<-vector()
setwd("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/TWAS_v4/comb_GeneExp")
#setwd("/home/ml2498/Desktop/GeneExpression/comb_GeneExp")
#pdf("Hist_BLUPs_res_GeneExpression.pdf",height = 4,width = 5)
#for (i in 101:200){
for (i in 2:(nm_gene+1)){
GeneID<-colnames(cand_counts)[i]
# ## if run it at local computer
# tryCatch({
# fit.asr <- eval(parse(text=paste("asreml(fixed = ",GeneID," ~ CHECK, random = ~ MLC_STANDARD:IS_EXPERIMENTAL+rep+rep/BLOCK+rep/COL1+rep/Plate+MLC_STANDARD:IS_EXPERIMENTAL:rep,na.action=na.method(x='omit'),data = cand_counts)",sep="")))
# }, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
# ##################################
### if run it on server
fit.asr <- eval(parse(text=paste("asreml(fixed = ",GeneID," ~ CHECK, random = ~ MLC_STANDARD:IS_EXPERIMENTAL+rep+rep/BLOCK+rep/COL1+rep/Plate+MLC_STANDARD:IS_EXPERIMENTAL:rep,na.method.X='omit',data = cand_counts)",sep="")))
#print(summary(fit.asr)$varcomp)
var_res<-summary(fit.asr)$varcomp[7,1]
Res<-c(Res,var_res)
var_geno<-summary(fit.asr)$varcomp[5,1]
Gvar<-c(Gvar,var_geno)
var_gxe<-summary(fit.asr)$varcomp[6,1]
GXEvar<-c(GXEvar,var_gxe)
var_r<-summary(fit.asr)$varcomp[1,1]
rep<-c(rep,var_r)
var_p<-summary(fit.asr)$varcomp[2,1]
plate<-c(plate,var_p)
var_b<-summary(fit.asr)$varcomp[4,1]
block<-c(block,var_b)
var_c<-summary(fit.asr)$varcomp[3,1]
colm<-c(colm,var_c)
blup<-fit.asr$coefficients$random
## laptop (asreml4)
#blup<-blup[-c(1:112,grep("MO17",names(blup))),]
## server (asreml3)
blup<-blup[-c(1:112,grep("MO17",names(blup)))]
intercept<-fit.asr$coefficients$fixed[4]
check99<-fit.asr$coefficients$fixed[3]
blup<-round(blup+intercept+check99,3)
count_Blup<-cbind(count_Blup,blup)
#residual<-fit.asr$residuals
#residual<-residual[-grep("MO17",cand_counts$MLC_mf)]
#residual<-round(residual,3)
#Residual<-cbind(Residual,residual)
}
colnames(count_Blup)<-colnames(cand_counts)[2:(nm_gene+1)]
#colnames(Residual)<-colnames(Residual)[2:(nm_gene+1)]
#rownames(Residual)<-cand_counts$MLC_mf[-grep("MO17",cand_counts$MLC_mf)]
count_Blup1<-cbind(as.character(names(blup)),count_Blup)
rownames(count_Blup1)<-NULL
count_Blup1<-count_Blup1[grep("MLC_STANDARD_",count_Blup1[,1]),]
count_Blup1[,1]<-sub("MLC_STANDARD_","",count_Blup1[,1])
count_Blup1[,1]<-sub(":IS_EXPERIMENTAL","",count_Blup1[,1])
colnames(count_Blup1)[1]<-"MLC_STANDARD"
write.table(count_Blup1,"hisat_BLUPs_rlog_21Kgenes.txt",col.names=T,row.names=F,sep="\t",quote=F)
## store variance components
VC<-cbind(colnames(cand_counts)[2:(nm_gene+1)],rep, block,colm,plate,Gvar,GXEvar,Res)
colnames(VC)<-c("GeneID","var_rep","var_block","var_col","var_plate","var_geno","var_gxe","var_residual")
write.table(VC,"variance_components_proportional_trpt_abundance_rlog.txt",col.names=T,row.names=F,sep="\t",quote=F)
################################################################################
## Determine genes with too many zero's using "filter_out_genes_w_many_zeros.R"
## filter out those 1435 genes from BLUPs
################################################################################
setwd("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/TWAS_v4/comb_GeneExp")
rm<-read.table("remove_genes_with_many0s_fromBLUPs.txt",header=T,sep="\t")
count_Blup1<-read.table("hisat_BLUPs_rlog_21Kgenes.txt",header=T,sep="\t")
count_Blup1<-count_Blup1[,-which(colnames(count_Blup1) %in% as.character(rm$geneID))]
#############################################################
###### PEER
#############################################################
reads_cutoff=2
count_Blup<-count_Blup1
count_Blup<-count_Blup[-grep("rep",count_Blup$MLC_STANDARD),] # 312x17281, MO17 not included
rownames(count_Blup)<-count_Blup[,1]
write.table(count_Blup,"hisat_BLUPs_rlog_20019_genes.txt",row.names=F,sep="\t",quote=F)
count_Blup<-count_Blup[,-1]
K_test<-min(round(dim(count_Blup)[1]/4),100)
align<-"hisat"
rep<-"BLUP"
transf<-"rlog"
library(peer)
set.seed(2010)
# build the model
model = PEER()
# run model
PEER_setNk(model,K_test)
# PEER_setNk(model,4)
PEER_setPhenoMean(model,as.matrix(count_Blup))
PEER_update(model) # Converged (var(residuals)) after 206 iterations, on server about 2.5-3 hours
# get precision
precision = PEER_getAlpha(model)
precision = cbind(paste("PEER",1:K_test,sep=""),precision)
colnames(precision)<-c("factor","precision")
setwd("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/TWAS_v4/PEERfactorSel")
if (reads_cutoff==2){
write.table(precision,paste(transf,"_",align,"_PEERfactor",K_test,"_precision_rep",rep,".txt",sep=""),col.names=T,row.names=F,sep="\t",quote=F)
} else if (reads_cutoff==1){
write.table(precision,paste(transf,"_",align,"_PEERfactor",K_test,"_precision_rep",rep,"_1M.txt",sep=""),col.names=T,row.names=F,sep="\t",quote=F)
}
###### plot 1/precision ############################
pdf(paste(transf,"_PEER_precision_rep",rep,".pdf",sep=""))
precision<-read.table(paste(transf,"_",align,"_PEERfactor",K_test,"_precision_rep",rep,".txt",sep=""),header=T,sep="\t")
precision$var<-1/as.numeric(as.character(precision$precision))
precision$factor<-as.numeric(precision$factor)
precision<-precision[order(precision$factor),]
prec_sub<-precision[2:40,]
prec_sub1<-precision[-1,]
plot(precision$factor,precision$var,main=paste("Precision of PEER factors: rep ",rep,sep=""),pch=1,xlab=NULL,ylab="1/Precision")
plot(prec_sub$factor,prec_sub$var,main=paste("Precision of PEER factors: rep ",rep,sep=""),
ylim=c(0,0.05),
pch=19,xlab=NULL,ylab="1/Precision",cex=0.5,lty=1)
plot(prec_sub1$factor,prec_sub1$var,main=paste("Precision of PEER factors: rep ",rep,sep=""),
ylim=c(0,0.04),
pch=19,xlab="Number of PEER factors",ylab="1/Precision",cex=0.5,lty=1)
abline(v=c(20),col="red")
dev.off()
#################################################
K<-20
align<-"hisat"
rep<-"BLUP"
transf<-"rlog"
library(peer)
set.seed(1987)
# build the model
model = PEER()
# run model
PEER_setNk(model,K)
# PEER_setNk(model,4)
PEER_setPhenoMean(model,as.matrix(count_Blup))
PEER_update(model) # Converged (var(residuals)) after 206 iterations, on server about 2.5-3 hours
PEERres = as.data.frame(PEER_getResiduals(model))
colnames(PEERres)<-colnames(count_Blup)
rownames(PEERres)<-rownames(count_Blup)
write.table(PEERres,paste(transf,"_",align,"_PEER",K,"_PEERres_rep",rep,".txt",sep=""),col.names=T,row.names=T,sep="\t",quote=F)
#############################
#############################
|
/GWAS_TWAS_gc_code/2.BLUP_combGeneExp.R
|
no_license
|
GoreLab/Maize_leaf_cuticle
|
R
| false
| false
| 15,534
|
r
|
# caluclate BLUPs on the rlog_clean data,
# may need further filtering on n% zero values, but can subset them from BLUP results
########################
align<-"hisat"
transf<-"rlog"
#########################
# reads_cutoff<-2 # 1 or 2 for 1M (2M) cutoff: reads per library
counts_1<-read.table(paste("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/Hisat_cleanedQC_counts_v4_rep1_rlog_clean.txt",sep=""),row.names=1)
counts_2<-read.table(paste("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/Hisat_cleanedQC_counts_v4_rep2_rlog_clean.txt",sep=""),row.names=1)
## not correct to use _1M. Need to check !!!!
# counts_1<-read.table(paste("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/Hisat_QC_cmp1_cor74_rep1_1M.txt",sep=""),header=T,sep="\t",row.names=1)
# counts_2<-read.table(paste("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/Hisat_QC_cmp1_cor74_rep2_1M.txt",sep=""),header=T,sep="\t",row.names=1)
# local
counts_1<-read.table(paste("/Users/Meng/Desktop/RNAseq_temp/Hisat_cleanedQC_counts_v4_rep1_rlog_clean.txt",sep=""),row.names=1)
counts_2<-read.table(paste("/Users/Meng/Desktop/RNAseq_temp/Hisat_cleanedQC_counts_v4_rep2_rlog_clean.txt",sep=""),row.names=1)
cm_gene<-intersect(rownames(counts_1),rownames(counts_2))
nm_gene<-length(cm_gene) # 21435 common genes, may need further filtering
#cm_line<-intersect(colnames(counts_1),colnames(counts_2))
#taxa<-read.table("/Users/Meng/Desktop/LabServer/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/taxa310_pheno_geno_rna.txt",header=F,stringsAsFactors=F)
taxa<-read.table("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/taxa310_pheno_geno_rna.txt",header=F,stringsAsFactors=F)
taxa<-taxa[,1]
counts_1_e<-counts_1[cm_gene,which(colnames(counts_1) %in% taxa)]
counts_1_c<-counts_1[cm_gene,grep("MO17",colnames(counts_1),fixed=T)]
cm_counts_1<-cbind(counts_1_e,counts_1_c)
counts_2_e<-counts_2[cm_gene,which(colnames(counts_2) %in% taxa)]
counts_2_c<-counts_2[cm_gene,grep("MO17",colnames(counts_2),fixed=T)]
cm_counts_2<-cbind(counts_2_e,counts_2_c)
#### for uploading RNA-seq data ####
setwd ("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/sequence_upload")
write.table(c(colnames(counts_1_e),colnames(counts_1_c)),"RNA-seq_samples_to_upload_rep1.txt",col.names=F,row.names=F,sep="\t",quote=F)
write.table(c(colnames(counts_2_e),colnames(counts_2_c)),"RNA-seq_samples_to_upload_rep2.txt",col.names=F,row.names=F,sep="\t",quote=F)
length(c(colnames(counts_1_e),colnames(counts_1_c)))
#####################################
which(rownames(cm_counts_1)!=rownames(cm_counts_2))
#length(union(colnames(cm_counts_1),colnames(cm_counts_2)))
taxa[which(taxa %in% union(colnames(cm_counts_1),colnames(cm_counts_2)))]
#### run rep 1 and rep 2 sequencially, and stack the two results ##################
for (rep in 1:2){
if (rep==1){
counts=cm_counts_1
} else {
counts=cm_counts_2
}
############################
counts.log<-counts
cand_counts <- as.data.frame(t(counts.log))
gene_name<-colnames(cand_counts)
#######################################
## experimental design ################
#design<-read.table(paste("/home/ml2498/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/TWAS/SD18_Design_Chk_Barcode_forBLUP.txt",sep=""),header=T,sep="\t")
#design<-read.table(paste("/Users/Meng/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/TWAS/SD18_Design_Chk_Barcode_forBLUP.txt",sep=""),header=T,sep="\t")
design<-read.table(paste("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/TWAS/SD18_Design_Chk_Barcode_forBLUP.txt",sep=""),header=T,sep="\t")
design$MLC_mf<-as.character(design$MLC_mf)
design<-design[which(design$book.replication==rep),]
design$MLC_mf[grep("^[0-9]",design$MLC_mf)]<-paste("X",design$MLC_mf[grep("^[0-9]",design$MLC_mf)],sep="")
design$CHECK<-99
design$CHECK[which(design$MLC_STANDARD=="MO17")]<-rep
design$IS_EXPERIMENTAL<-1
design$IS_EXPERIMENTAL[which(design$MLC_STANDARD=="MO17")]<-0
design$COL1<-design$book.cols
## SD18 only
design$COL1[which(design$book.cols>9)]<-19-design$book.cols[which(design$book.cols>9)]
design<-design[,c(6,9:12,3,5)]
colnames(design)<-c("MLC_STANDARD", "MLC_mf", "CHECK", "IS_EXPERIMENTAL", "COL1","BLOCK","rep1")
# add plate information
#plate_info<-read.table(paste("/home/ml2498/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/RNAseq_PlateInfo_v4_rep",rep,".txt",sep=""),header=T,sep="\t")
#plate_info<-read.table(paste("/Users/Meng/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/RNAseq_PlateInfo_v4_rep",rep,".txt",sep=""),header=T,sep="\t")
plate_info<-read.table(paste("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/RNAseq_PlateInfo_v4_rep",rep,".txt",sep=""),header=T,sep="\t")
### for checking
# taxa_t<-rownames(cand_counts)
# taxa_pl<-as.character(plate_info[,1])
# taxa_d<-as.character(design$MLC_mf)
# taxa_t_pl<-intersect(taxa_t,taxa_pl)
# taxa_t_d<-intersect(taxa_t,taxa_d)
# taxa_pl_d<-intersect(taxa_pl,taxa_d)
# taxa_t.1<-read.table("/home/ml2498/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/Individuals_after_filter_rep_1.txt",header=T,sep="\t")
# taxa_t.2<-read.table("/home/ml2498/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/RNAseq/GeneExpression/v4_counts/Individuals_after_filter_rep_2.txt",header=T,sep="\t")
# taxa_t.both<-union(taxa_t.1[,1],taxa_t.2[,1])
# taxa_final<-intersect(taxa_t.both,taxa_d)
# taxa_pheno<-as.character(pheno.all$MLC_STANDARD[!is.na(pheno.all$ft_SD18_both_untr)])
# taxa_final.2<-intersect(taxa_t.both,taxa_pheno)
## end of checking
colnames(plate_info)[1]<-"MLC_mf"
design<-merge(design,plate_info,by="MLC_mf",all=F)
cand_counts<-cbind(rownames(cand_counts),cand_counts)
colnames(cand_counts)[1]<-"MLC_mf"
rownames(cand_counts)<-NULL
#cand_counts_test<-merge(cand_counts[,1:5],design,by="MLC_mf",all=T)
cand_counts<-merge(cand_counts,design,by="MLC_mf",all=F)
#colnames(cand_counts)[ncol(cand_counts)-1]<-"CEadj"
cand_counts$rep<-rep
if(rep==1){
cand_counts_1<-cand_counts
}else{
cand_counts_2<-cand_counts
}
}
which(colnames(cand_counts_1)!=colnames(cand_counts_2))
############################################################
# Plot gene-based correlation between rep1 & rep2, before PEER
##############################################################
# cm_taxa<-intersect(as.character(cand_counts_1[,1]),as.character(cand_counts_2[,1]))
# cm_gene_taxa_1<-cand_counts_1[which(cand_counts_1[,1] %in% cm_taxa),]
# cm_gene_taxa_2<-cand_counts_2[which(cand_counts_2[,1] %in% cm_taxa),]
# which(as.character(cm_gene_taxa_1[,1])!=as.character(cm_gene_taxa_2[,1]))
# Correlations<-vector()
# for (i in 2:(1+length(cm_gene))){
# correlation<-round(cor(cm_gene_taxa_1[,i],cm_gene_taxa_2[,i]),3)
# Correlations<-c(Correlations,correlation)
# }
# setwd("/home/ml2498/Desktop/Labserver/MaizeLeafCuticle/TWAS_2018/TWAS_v4/H2_est")
# pdf("gene-based_cor_rep1vsrep2_bfPEER.pdf",height=6,width=8)
# hist(Correlations,xlab="Correlation",main="gene-based correlation between rep1 & rep2, before PEER")
# dev.off()
###########################################################
############# after running rep 1 & 2, row bind cand_counts1 & 2
cand_counts_both<-rbind.data.frame(cand_counts_1,cand_counts_2) # 313 unique lines, 245 lines in both environments; 282 rep1 and 302 rep 2
length(unique(as.character(cand_counts_both$MLC_STANDARD))) # should be 311 including MO17, 310 experimental lines
setwd("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/TWAS_v4/comb_GeneExp")
write.table(cand_counts_both,"Hisat_cleanedQC_counts_v4_repBoth_rlog_clean_wDesign.txt",row.names=F,sep="\t",quote=F)
######## BLUPs calculation (only for combined exp) #############
#### Path of the license:
lic_path = "/workdir/ml2498/ASReml/License/"
# Load asreml:
setwd(lic_path)
library(asreml)
asreml.lic(license = "asreml.lic", install = TRUE)
#################################
cand_counts=cand_counts_both
for (i in 2:(nm_gene+1)){ # for subset H2
#for (i in 2:(length(gene_name)+1)){ # for whole set BLUPs
cand_counts[,i]<-as.numeric(as.character(cand_counts[,i]))
}
cand_counts$COL1<-as.factor(cand_counts$COL1)
cand_counts$BLOCK<-as.factor(cand_counts$BLOCK)
cand_counts$CHECK<-as.factor(cand_counts$CHECK)
cand_counts$Plate<-as.factor(cand_counts$Plate)
cand_counts$rep<-as.factor(cand_counts$rep)
cand_counts$MLC_STANDARD<-as.factor(as.character(cand_counts$MLC_STANDARD))
cand_counts$IS_EXPERIMENTAL<-as.numeric(cand_counts$IS_EXPERIMENTAL)
Res<-vector()
Gvar<-vector()
GXEvar<-vector()
plate<-vector()
block<-vector()
colm<-vector()
rep<-vector()
######################################
# If only want to have BLUPs
######################################
# only run this line when BLUPs are needed
count_Blup<-vector()
#Residual<-vector()
setwd("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/TWAS_v4/comb_GeneExp")
#setwd("/home/ml2498/Desktop/GeneExpression/comb_GeneExp")
#pdf("Hist_BLUPs_res_GeneExpression.pdf",height = 4,width = 5)
#for (i in 101:200){
for (i in 2:(nm_gene+1)){
GeneID<-colnames(cand_counts)[i]
# ## if run it at local computer
# tryCatch({
# fit.asr <- eval(parse(text=paste("asreml(fixed = ",GeneID," ~ CHECK, random = ~ MLC_STANDARD:IS_EXPERIMENTAL+rep+rep/BLOCK+rep/COL1+rep/Plate+MLC_STANDARD:IS_EXPERIMENTAL:rep,na.action=na.method(x='omit'),data = cand_counts)",sep="")))
# }, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
# ##################################
### if run it on server
fit.asr <- eval(parse(text=paste("asreml(fixed = ",GeneID," ~ CHECK, random = ~ MLC_STANDARD:IS_EXPERIMENTAL+rep+rep/BLOCK+rep/COL1+rep/Plate+MLC_STANDARD:IS_EXPERIMENTAL:rep,na.method.X='omit',data = cand_counts)",sep="")))
#print(summary(fit.asr)$varcomp)
var_res<-summary(fit.asr)$varcomp[7,1]
Res<-c(Res,var_res)
var_geno<-summary(fit.asr)$varcomp[5,1]
Gvar<-c(Gvar,var_geno)
var_gxe<-summary(fit.asr)$varcomp[6,1]
GXEvar<-c(GXEvar,var_gxe)
var_r<-summary(fit.asr)$varcomp[1,1]
rep<-c(rep,var_r)
var_p<-summary(fit.asr)$varcomp[2,1]
plate<-c(plate,var_p)
var_b<-summary(fit.asr)$varcomp[4,1]
block<-c(block,var_b)
var_c<-summary(fit.asr)$varcomp[3,1]
colm<-c(colm,var_c)
blup<-fit.asr$coefficients$random
## laptop (asreml4)
#blup<-blup[-c(1:112,grep("MO17",names(blup))),]
## server (asreml3)
blup<-blup[-c(1:112,grep("MO17",names(blup)))]
intercept<-fit.asr$coefficients$fixed[4]
check99<-fit.asr$coefficients$fixed[3]
blup<-round(blup+intercept+check99,3)
count_Blup<-cbind(count_Blup,blup)
#residual<-fit.asr$residuals
#residual<-residual[-grep("MO17",cand_counts$MLC_mf)]
#residual<-round(residual,3)
#Residual<-cbind(Residual,residual)
}
colnames(count_Blup)<-colnames(cand_counts)[2:(nm_gene+1)]
#colnames(Residual)<-colnames(Residual)[2:(nm_gene+1)]
#rownames(Residual)<-cand_counts$MLC_mf[-grep("MO17",cand_counts$MLC_mf)]
count_Blup1<-cbind(as.character(names(blup)),count_Blup)
rownames(count_Blup1)<-NULL
count_Blup1<-count_Blup1[grep("MLC_STANDARD_",count_Blup1[,1]),]
count_Blup1[,1]<-sub("MLC_STANDARD_","",count_Blup1[,1])
count_Blup1[,1]<-sub(":IS_EXPERIMENTAL","",count_Blup1[,1])
colnames(count_Blup1)[1]<-"MLC_STANDARD"
write.table(count_Blup1,"hisat_BLUPs_rlog_21Kgenes.txt",col.names=T,row.names=F,sep="\t",quote=F)
## store variance components
VC<-cbind(colnames(cand_counts)[2:(nm_gene+1)],rep, block,colm,plate,Gvar,GXEvar,Res)
colnames(VC)<-c("GeneID","var_rep","var_block","var_col","var_plate","var_geno","var_gxe","var_residual")
write.table(VC,"variance_components_proportional_trpt_abundance_rlog.txt",col.names=T,row.names=F,sep="\t",quote=F)
################################################################################
## Determine genes with too many zero's using "filter_out_genes_w_many_zeros.R"
## filter out those 1435 genes from BLUPs
################################################################################
setwd("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/TWAS_v4/comb_GeneExp")
rm<-read.table("remove_genes_with_many0s_fromBLUPs.txt",header=T,sep="\t")
count_Blup1<-read.table("hisat_BLUPs_rlog_21Kgenes.txt",header=T,sep="\t")
count_Blup1<-count_Blup1[,-which(colnames(count_Blup1) %in% as.character(rm$geneID))]
#############################################################
###### PEER
#############################################################
reads_cutoff=2
count_Blup<-count_Blup1
count_Blup<-count_Blup[-grep("rep",count_Blup$MLC_STANDARD),] # 312x17281, MO17 not included
rownames(count_Blup)<-count_Blup[,1]
write.table(count_Blup,"hisat_BLUPs_rlog_20019_genes.txt",row.names=F,sep="\t",quote=F)
count_Blup<-count_Blup[,-1]
K_test<-min(round(dim(count_Blup)[1]/4),100)
align<-"hisat"
rep<-"BLUP"
transf<-"rlog"
library(peer)
set.seed(2010)
# build the model
model = PEER()
# run model
PEER_setNk(model,K_test)
# PEER_setNk(model,4)
PEER_setPhenoMean(model,as.matrix(count_Blup))
PEER_update(model) # Converged (var(residuals)) after 206 iterations, on server about 2.5-3 hours
# get precision
precision = PEER_getAlpha(model)
precision = cbind(paste("PEER",1:K_test,sep=""),precision)
colnames(precision)<-c("factor","precision")
setwd("/workdir/ml2498/MaizeLeafCuticle/TWAS_2018/TWAS_v4/PEERfactorSel")
if (reads_cutoff==2){
write.table(precision,paste(transf,"_",align,"_PEERfactor",K_test,"_precision_rep",rep,".txt",sep=""),col.names=T,row.names=F,sep="\t",quote=F)
} else if (reads_cutoff==1){
write.table(precision,paste(transf,"_",align,"_PEERfactor",K_test,"_precision_rep",rep,"_1M.txt",sep=""),col.names=T,row.names=F,sep="\t",quote=F)
}
###### plot 1/precision ############################
pdf(paste(transf,"_PEER_precision_rep",rep,".pdf",sep=""))
precision<-read.table(paste(transf,"_",align,"_PEERfactor",K_test,"_precision_rep",rep,".txt",sep=""),header=T,sep="\t")
precision$var<-1/as.numeric(as.character(precision$precision))
precision$factor<-as.numeric(precision$factor)
precision<-precision[order(precision$factor),]
prec_sub<-precision[2:40,]
prec_sub1<-precision[-1,]
plot(precision$factor,precision$var,main=paste("Precision of PEER factors: rep ",rep,sep=""),pch=1,xlab=NULL,ylab="1/Precision")
plot(prec_sub$factor,prec_sub$var,main=paste("Precision of PEER factors: rep ",rep,sep=""),
ylim=c(0,0.05),
pch=19,xlab=NULL,ylab="1/Precision",cex=0.5,lty=1)
plot(prec_sub1$factor,prec_sub1$var,main=paste("Precision of PEER factors: rep ",rep,sep=""),
ylim=c(0,0.04),
pch=19,xlab="Number of PEER factors",ylab="1/Precision",cex=0.5,lty=1)
abline(v=c(20),col="red")
dev.off()
#################################################
K<-20
align<-"hisat"
rep<-"BLUP"
transf<-"rlog"
library(peer)
set.seed(1987)
# build the model
model = PEER()
# run model
PEER_setNk(model,K)
# PEER_setNk(model,4)
PEER_setPhenoMean(model,as.matrix(count_Blup))
PEER_update(model) # Converged (var(residuals)) after 206 iterations, on server about 2.5-3 hours
PEERres = as.data.frame(PEER_getResiduals(model))
colnames(PEERres)<-colnames(count_Blup)
rownames(PEERres)<-rownames(count_Blup)
write.table(PEERres,paste(transf,"_",align,"_PEER",K,"_PEERres_rep",rep,".txt",sep=""),col.names=T,row.names=T,sep="\t",quote=F)
#############################
#############################
|
library(shiny)
library(sf)
library(dplyr)
library(ggplot2)
library(rsconnect)
setwd("C:/Users/chris/Desktop/term project/R 프로젝트/Rshiny")
runApp("project_rshiny")
|
/R-project/Rshiny/project_rshiny/app.R
|
no_license
|
christy4526/KPC_ai_class
|
R
| false
| false
| 179
|
r
|
library(shiny)
library(sf)
library(dplyr)
library(ggplot2)
library(rsconnect)
setwd("C:/Users/chris/Desktop/term project/R 프로젝트/Rshiny")
runApp("project_rshiny")
|
testit::test_pkg('pagedown', 'test-cran')
|
/tests/test-cran.R
|
permissive
|
rstudio/pagedown
|
R
| false
| false
| 42
|
r
|
testit::test_pkg('pagedown', 'test-cran')
|
# titanic is avaliable in your workspace
# Check out the structure of titanic
str(titanic)
# Use ggplot() for the first instruction
#install.packages("ggplot2")
library(ggplot2)
ggplot(titanic,aes(x = factor(Pclass), fill = factor(Sex))) +
geom_bar(position = "dodge")
# Use ggplot() for the second instruction
ggplot(titanic,aes(x = factor(Pclass), fill = factor(Sex))) +
geom_bar(position = "dodge") +
facet_grid("~Survived")
# Position jitter (use below)
posn.j <- position_jitter(0.5, 0)
# Use ggplot() for the last instruction
ggplot(titanic,aes(x = factor(Pclass), y =Age, col = factor(Sex))) +
geom_jitter(size = 3, alpha = 0.5, position = posn.j) +
facet_grid("~Survived")
|
/Titanic.R
|
no_license
|
nouraAbuKhamiss/Foundations-of-Data-Science
|
R
| false
| false
| 700
|
r
|
# titanic is avaliable in your workspace
# Check out the structure of titanic
str(titanic)
# Use ggplot() for the first instruction
#install.packages("ggplot2")
library(ggplot2)
ggplot(titanic,aes(x = factor(Pclass), fill = factor(Sex))) +
geom_bar(position = "dodge")
# Use ggplot() for the second instruction
ggplot(titanic,aes(x = factor(Pclass), fill = factor(Sex))) +
geom_bar(position = "dodge") +
facet_grid("~Survived")
# Position jitter (use below)
posn.j <- position_jitter(0.5, 0)
# Use ggplot() for the last instruction
ggplot(titanic,aes(x = factor(Pclass), y =Age, col = factor(Sex))) +
geom_jitter(size = 3, alpha = 0.5, position = posn.j) +
facet_grid("~Survived")
|
library(ggplot2)
library(reshape2)
library(dplyr) # required for arrange()
library(colorspace)
library(viridis)
library(maps) # for the state map data
library(mapproj)
murder_rates <- read.csv(file = 'MurderRate.csv')
murder_rates$state <- tolower(murder_rates$state)
states_map<-map_data("state") #extracts data from the states map
crimes<-data.frame(state=tolower(rownames(USArrests)),USArrests)
crimes<-merge(crimes,murder_rates,by="state")
crime_map<-merge(states_map,crimes,by.x="region",by.y="state")
crime_map<-arrange(crime_map,group,order)
c = hsv((max(crime_map$Murder)-crime_map$Murder)/(max(crime_map$Murder)-min(crime_map$Murder))/7,(crime_map$rate-min(crime_map$rate))/(max(crime_map$rate)-min(crime_map$rate)),.95)
#Hue determined by Murder Arrest Rate and Saturation determined by Murder Rate
ggplot(crime_map,aes(x=long,y=lat,group=group)) +
coord_map("polyconic") +
geom_polygon(fill=c)
#Hue determined by Murder Arrest Rate and Brightness determined by Murder Rate
ggplot(crime_map) +
coord_map("polyconic") +
geom_polygon(aes(x=long, y=lat, group=group, fill=Murder)) +
geom_polygon(aes(x=long, y=lat, group=group, alpha=rate))
|
/Homework/Color/USArrests.R
|
no_license
|
TobyJChappell/CS_710
|
R
| false
| false
| 1,165
|
r
|
library(ggplot2)
library(reshape2)
library(dplyr) # required for arrange()
library(colorspace)
library(viridis)
library(maps) # for the state map data
library(mapproj)
murder_rates <- read.csv(file = 'MurderRate.csv')
murder_rates$state <- tolower(murder_rates$state)
states_map<-map_data("state") #extracts data from the states map
crimes<-data.frame(state=tolower(rownames(USArrests)),USArrests)
crimes<-merge(crimes,murder_rates,by="state")
crime_map<-merge(states_map,crimes,by.x="region",by.y="state")
crime_map<-arrange(crime_map,group,order)
c = hsv((max(crime_map$Murder)-crime_map$Murder)/(max(crime_map$Murder)-min(crime_map$Murder))/7,(crime_map$rate-min(crime_map$rate))/(max(crime_map$rate)-min(crime_map$rate)),.95)
#Hue determined by Murder Arrest Rate and Saturation determined by Murder Rate
ggplot(crime_map,aes(x=long,y=lat,group=group)) +
coord_map("polyconic") +
geom_polygon(fill=c)
#Hue determined by Murder Arrest Rate and Brightness determined by Murder Rate
ggplot(crime_map) +
coord_map("polyconic") +
geom_polygon(aes(x=long, y=lat, group=group, fill=Murder)) +
geom_polygon(aes(x=long, y=lat, group=group, alpha=rate))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cvCovEst.R
\name{cvCovEst}
\alias{cvCovEst}
\title{Cross-Validated Covariance Matrix Estimator Selector}
\usage{
cvCovEst(
dat,
estimators = c(linearShrinkEst, thresholdingEst, sampleCovEst),
estimator_params = list(linearShrinkEst = list(alpha = 0), thresholdingEst =
list(gamma = 0)),
cv_loss = cvMatrixFrobeniusLoss,
cv_scheme = "v_fold",
mc_split = 0.5,
v_folds = 10L,
center = TRUE,
scale = FALSE,
parallel = FALSE
)
}
\arguments{
\item{dat}{A numeric \code{data.frame}, \code{matrix}, or similar object.}
\item{estimators}{A \code{list} of estimator functions to be considered in
the cross-validated estimator selection procedure.}
\item{estimator_params}{A named \code{list} of arguments corresponding to
the hyperparameters of covariance matrix estimators in \code{estimators}.
The name of each list element should match the name of an estimator passed
to \code{estimators}. Each element of the \code{estimator_params} is itself
a named \code{list}, with the names corresponding to a given estimator's
hyperparameter(s). The hyperparameter(s) may be in the form of a single
\code{numeric} or a \code{numeric} vector. If no hyperparameter is needed
for a given estimator, then the estimator need not be listed.}
\item{cv_loss}{A \code{function} indicating the loss function to be used.
This defaults to the Frobenius loss, \code{\link{cvMatrixFrobeniusLoss}()}.
An observation-based version, \code{\link{cvFrobeniusLoss}()}, is also made
available. Additionally, the \code{\link{cvScaledMatrixFrobeniusLoss}()} is
included for situations in which \code{dat}'s variables are of different
scales.}
\item{cv_scheme}{A \code{character} indicating the cross-validation scheme
to be employed. There are two options: (1) V-fold cross-validation, via
\code{"v_folds"}; and (2) Monte Carlo cross-validation, via \code{"mc"}.
Defaults to Monte Carlo cross-validation.}
\item{mc_split}{A \code{numeric} between 0 and 1 indicating the proportion
of observations to be included in the validation set of each Monte Carlo
cross-validation fold.}
\item{v_folds}{An \code{integer} larger than or equal to 1 indicating the
number of folds to use for cross-validation. The default is 10, regardless
of the choice of cross-validation scheme.}
\item{center}{A \code{logical} indicating whether to center the columns of
\code{dat} to have mean zero.}
\item{scale}{A \code{logical} indicating whether to scale the columns of
\code{dat} to have unit variance.}
\item{parallel}{A \code{logical} option indicating whether to run the main
cross-validation loop with \code{\link[future.apply]{future_lapply}()}. This
is passed directly to \code{\link[origami]{cross_validate}()}.}
}
\value{
A \code{list} of results containing the following elements:
\itemize{
\item \code{estimate} - A \code{matrix} corresponding to the estimate of
the optimal covariance matrix estimator.
\item \code{estimator} - A \code{character} indicating the optimal
estimator and corresponding hyperparameters, if any.
\item \code{risk_df} - A \code{\link[tibble]{tibble}} providing the
cross-validated risk estimates of each estimator.
\item \code{cv_df} - A \code{\link[tibble]{tibble}} providing each
estimators' loss over the folds of the cross-validated procedure.
\item \code{args} - A named \code{list} containing arguments passed to
\code{cvCovEst}.
}
}
\description{
\code{cvCovEst()} identifies the optimal covariance matrix
estimator from among a set of candidate estimators.
}
\examples{
cvCovEst(
dat = mtcars,
estimators = c(
linearShrinkLWEst, thresholdingEst, sampleCovEst
),
estimator_params = list(
thresholdingEst = list(gamma = seq(0.1, 0.3, 0.1))
),
center = TRUE,
scale = TRUE
)
}
|
/man/cvCovEst.Rd
|
permissive
|
PaplomatasP/cvCovEst
|
R
| false
| true
| 3,791
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cvCovEst.R
\name{cvCovEst}
\alias{cvCovEst}
\title{Cross-Validated Covariance Matrix Estimator Selector}
\usage{
cvCovEst(
dat,
estimators = c(linearShrinkEst, thresholdingEst, sampleCovEst),
estimator_params = list(linearShrinkEst = list(alpha = 0), thresholdingEst =
list(gamma = 0)),
cv_loss = cvMatrixFrobeniusLoss,
cv_scheme = "v_fold",
mc_split = 0.5,
v_folds = 10L,
center = TRUE,
scale = FALSE,
parallel = FALSE
)
}
\arguments{
\item{dat}{A numeric \code{data.frame}, \code{matrix}, or similar object.}
\item{estimators}{A \code{list} of estimator functions to be considered in
the cross-validated estimator selection procedure.}
\item{estimator_params}{A named \code{list} of arguments corresponding to
the hyperparameters of covariance matrix estimators in \code{estimators}.
The name of each list element should match the name of an estimator passed
to \code{estimators}. Each element of the \code{estimator_params} is itself
a named \code{list}, with the names corresponding to a given estimator's
hyperparameter(s). The hyperparameter(s) may be in the form of a single
\code{numeric} or a \code{numeric} vector. If no hyperparameter is needed
for a given estimator, then the estimator need not be listed.}
\item{cv_loss}{A \code{function} indicating the loss function to be used.
This defaults to the Frobenius loss, \code{\link{cvMatrixFrobeniusLoss}()}.
An observation-based version, \code{\link{cvFrobeniusLoss}()}, is also made
available. Additionally, the \code{\link{cvScaledMatrixFrobeniusLoss}()} is
included for situations in which \code{dat}'s variables are of different
scales.}
\item{cv_scheme}{A \code{character} indicating the cross-validation scheme
to be employed. There are two options: (1) V-fold cross-validation, via
\code{"v_folds"}; and (2) Monte Carlo cross-validation, via \code{"mc"}.
Defaults to Monte Carlo cross-validation.}
\item{mc_split}{A \code{numeric} between 0 and 1 indicating the proportion
of observations to be included in the validation set of each Monte Carlo
cross-validation fold.}
\item{v_folds}{An \code{integer} larger than or equal to 1 indicating the
number of folds to use for cross-validation. The default is 10, regardless
of the choice of cross-validation scheme.}
\item{center}{A \code{logical} indicating whether to center the columns of
\code{dat} to have mean zero.}
\item{scale}{A \code{logical} indicating whether to scale the columns of
\code{dat} to have unit variance.}
\item{parallel}{A \code{logical} option indicating whether to run the main
cross-validation loop with \code{\link[future.apply]{future_lapply}()}. This
is passed directly to \code{\link[origami]{cross_validate}()}.}
}
\value{
A \code{list} of results containing the following elements:
\itemize{
\item \code{estimate} - A \code{matrix} corresponding to the estimate of
the optimal covariance matrix estimator.
\item \code{estimator} - A \code{character} indicating the optimal
estimator and corresponding hyperparameters, if any.
\item \code{risk_df} - A \code{\link[tibble]{tibble}} providing the
cross-validated risk estimates of each estimator.
\item \code{cv_df} - A \code{\link[tibble]{tibble}} providing each
estimators' loss over the folds of the cross-validated procedure.
\item \code{args} - A named \code{list} containing arguments passed to
\code{cvCovEst}.
}
}
\description{
\code{cvCovEst()} identifies the optimal covariance matrix
estimator from among a set of candidate estimators.
}
\examples{
cvCovEst(
dat = mtcars,
estimators = c(
linearShrinkLWEst, thresholdingEst, sampleCovEst
),
estimator_params = list(
thresholdingEst = list(gamma = seq(0.1, 0.3, 0.1))
),
center = TRUE,
scale = TRUE
)
}
|
# EDA-CLU-Distribution_Based.R
#
# Purpose: A Bioinformatics Course:
# R code accompanying the EDA-CLU-Distribution Based unit.
#
# Version: 0.1
#
# Date: 2017 10 14
# Author: Marcus Chiam
#
# Versions:
# 0.1 Learning unit for the EDA-CLU-Distribution_Based section.
#
# TODO:
# 1. Install packages
# 2. Generate synthetic data
# 3. Perform clustering on synthetic data
# 4. Compare clustering results with synthetic data clusters
# 5. Get data from myGOExSet.RData
# 6. Clean the data
# 7. Perform clustering on the data
# 8. Observe results of clustering
# 9. Plot the data for visualization
# 10. Revisiting our methods and thinking of alternatives
# = 1. Install packages =========================================================================
# we will be using mlbench to generate synthetic data
if (!require(mlbench, quietly = TRUE)) {
install.packages("mlbench")
library(mlbench)
}
# we will be using the Mclust function in the mclust package for clustering
if (!require(mclust, quietly = TRUE)) {
install.packages("mclust")
library(mclust)
}
# we will be using a plotting function from factoextra to plot our data
if (!require(factoextra, quietly = TRUE)) {
install.packages("factoextra")
library(factoextra)
}
# we will be using a data imputation function from the mix package
# to deal with NA values in our gene expression data set
if (!require(mix, quietly = TRUE)) {
install.packages("mix")
library(mix)
}
# we will use 3dplot as an alternative plotting tool
if (!require(rgl, quietly = TRUE)) {
install.packages("rgl")
library(rgl)
}
# = 2. Generate synthetic data ==================================================================
set.seed(100)
# we will first start by confirming our clustering algorithm functions properly
# by using it on synthetic data and comparing its results with the synthetic data clusters
# the function mlbench.2dnormals(n, cl=2, r=sqrt(cl), sd=1) generates n normally distributed
# 2D data points. The parameter cl refers to the number of classes (or clusters)
# in the data set. More information on the parameters can be found here:
# https://www.rdocumentation.org/packages/mlbench/versions/2.1-1/topics/mlbench.2dnormals
# we will use mlbench.2dnormals to generate 100 2D data points that are grouped into one of 2 classes (clusters)
synthetic_data <- mlbench.2dnormals(100,2)
# synthetic_data$x is a matrix that shows the data points that were generated
head(synthetic_data$x)
# synthetic_data$classes is a vector that shows what cluster number each data point is assigned to
head(synthetic_data$classes)
# plot the points
plot(synthetic_data$x)
# plot the points with the synthetic cluster groupings
plot(synthetic_data)
# = 3. Perform clustering on synthetic data ======================================================
# Now that we have the generated data points and their associated cluster assignments,
# let's use the Mclust function to apply distribution-based clustering on the data points.
# Then we can compare how effective our Mclust function is at clustering by comparing its
# clustering assignment results with the clustering assignments from mlbench.2dnormals
# model-based-clustering using the Mclust function
# type ?Mclust for more details about the function outputs
clustered_synthetic_data <- Mclust(synthetic_data$x)
# = 4. Compare clustering results with synthetic data clusters ===================================
# After we pass our model through the Mclust function, let's take a look at the results
# the $G attribute shows the number of clusters that Mclust decided was optimal for the data set
clustered_synthetic_data$G
# result is 2, which is the same as ml2bench.2dnormals; so far so good
# plot the points with the cluster groupings
plot(clustered_synthetic_data,"classification")
# compare the the synthetic data clusters, with the clusters generated from the Mclust function
# cycle between the two plots below:
# synthetic clusters
plot(synthetic_data)
# Mclust clusters
plot(clustered_synthetic_data,"classification")
# the cluster groupings look pretty similar!
# to look into more detail, we can also check the cluster assignments for each individual data point
# synthetic cluster assignments
synthetic_data$classes
# Mclust cluster assignments
clustered_synthetic_data$classification
# The two vectors are quite similar!
# To quantify this similarity, we can see what percentage of data points have the same assignments
# between the two vectors.
# We make a logical vector that contains a TRUE value for each data point that has
# the same clustering assignment in both vectors, and FALSE otherwise.
# We then sum up the TRUE values and divide by 100 (since we have a total of 100 data points)
sum(synthetic_data$classes == clustered_synthetic_data$classification)/100
# 92% similarity...not bad!
# keep in mind, the cluster number is arbitrary, so there may be cases where the synthetic clustering
# assigned a cluster as "1", but the Mclust cluster assignment assigned that same cluster as "2"
# in which case, you would expect to see the majority of data points from the synthetic cluster "1",
# being assigned to cluster "2" in the Mclust classification
# EXAMPLE: synthetic cluster assignment vector --> 1 1 1 2 1 2
# Mclust cluster assignment vector --> 2 2 2 1 2 1
# in the above example, the cluster number assignments are different between the two vectors
# but the important thing is that the data points are grouped the same way in both vectors,
# just that the clusters have different number labels
# before we move to real data, I would encourage you to play around more with mlbench
# and try generating different data sets, perhaps with a different number of data points
# and a different number of classification/clusters
# = 5. Get data from myGOExSet.RData ============================================================
# hopefully the exercise above will convince you that the Mclust function works properly
# now let's move onto real data
# import the myGOExSet data set
data(myGOExSet)
head(myGOExSet,3)
# = 6. Clean the data ===========================================================================
# exclude character columns; only keep the numeric columns denoting time points
df <- myGOExSet[,-1:-4]
head(df)
# we need to resolve any NA values, since Mclust will raise an error if we have any NA values
# impute data for NA values
df_no_na <- imputeData(df)
# alternatively we can omit NA values (this will remove the entire gene row) by typing:
# df_no_na <- na.omit(df)
# = 7. Perform clustering on the data ============================================================
# model-based-clustering using the Mclust function
# type ?Mclust for more details about the function outputs
mc <- Mclust(df_no_na)
# = 8. Observe results of clustering =============================================================
# mc$G shows the optimal number of clusters
mc$G
# mc$z is a matrix showing the probability of each gene belonging to a particular cluster
head(mc$z,30)
# mc$classification shows the cluster assignement of each gene
head(mc$classification,30)
# = 9. Plot the data for visualization ===========================================================
# Since we are clustering all points in a time-series data set,
# each time point is treated as its own dimension.
# Therefore with 13 different time points, we are working with 13-dimensional data.
# We can confirm this by typing mc$d which tells us how many dimensions we are working with
mc$d
# Unfortunately, there isn't one good and effective way to visualize high dimensional data
# On a 2d plot, we can try to plot on only two dimensions and see how they cluster
fviz_cluster(mc,axes=c(1,2),pointsize=1.0,labelsize=0)
# Each data point is color-coded to show which cluster it belongs to
# we can manipulate which dimensions to show with axes parameter; it takes a vector of length 2
# c(1,2) shows dimensions 1 and 2 on the plot
# lets take a look dimensions 3 and 4
fviz_cluster(mc,axes=c(3,4),pointsize=1.0,labelsize=0)
# still, even with this, it is quite hard to visualize our data
# Another alternative is to plot in 3 dimensions, with each dimension being one cluster group.
# We are fortunate (this time) to have our data be clustered into only 3 groups.
# By plotting in this way, data points that are more likely to belong to a certain cluster,
# will be represented by how far along they are on the axis of that cluster group
mc$z
# a reminder that the $z attribute is a matrix showing the probability of
# each gene belonging to a particular cluster
# now plot it in 3D
plot3d(mc$z)
# seems that very few genes belong to cluster 3
# this can be confirmed by trying:
length(which(mc$classification == 3))
# only 8 out of the 281 genes are clustered into group 3
# it seems our Mclust function may not be performing so well...
# or perhaps it's due to the data itself?
# = 10. Revisiting our methods and thinking of alternatives ===========================================================
# Let's look at our cluster assignments again
mc$classification
# data points belonging to the same cluster may have some kind of meaning in context
# to the data set we are working with.
# Perhaps having similar gene expression profiles (with regards to the time-series) may help
# elucidate their function.
# Although it is very difficult to draw conclusions, seeing as we're working with so many dimensions.
# It may be better to try clustering only between two time points, rather than 12
# remove all other time points except the first two columns
two_dimensional_data <- df_no_na[,-3:-13]
# cluster
mc_tdd <- Mclust(two_dimensional_data)
# sanity check, to make sure we only have two dimensions
mc_tdd$d
mc_tdd$G
# 3 clusters generated again...hm...
# let's look at the classifications
mc_tdd$classification
# and plot
plot(mc_tdd, "classification")
# Whether this plot has more "meaning" than the previous one, is hard to say,
# However, the context is easier to interpret:
# genes that are clustered in the same group, must have similar gene expression profiles
# between the two dimensions we had in our data, i.e. the time points t0 and t10.
# Arguably, this result is easier to interpret as well as being easier to make inferences
# to potential applications of this result.
# Although, if we wanted to be thorough, we would have to cluster each pair of dimensions
# (t0 with everything, t1 with everything, ..., t120 with everything) which would take a
# lot of time.
# But this result, I believe, is more substantial and the time invested will be worthwhile
|
/EDA-CLU-Distribution_Based.R
|
no_license
|
hyginn/BCB410-DataScience
|
R
| false
| false
| 10,680
|
r
|
# EDA-CLU-Distribution_Based.R
#
# Purpose: A Bioinformatics Course:
# R code accompanying the EDA-CLU-Distribution Based unit.
#
# Version: 0.1
#
# Date: 2017 10 14
# Author: Marcus Chiam
#
# Versions:
# 0.1 Learning unit for the EDA-CLU-Distribution_Based section.
#
# TODO:
# 1. Install packages
# 2. Generate synthetic data
# 3. Perform clustering on synthetic data
# 4. Compare clustering results with synthetic data clusters
# 5. Get data from myGOExSet.RData
# 6. Clean the data
# 7. Perform clustering on the data
# 8. Observe results of clustering
# 9. Plot the data for visualization
# 10. Revisiting our methods and thinking of alternatives
# = 1. Install packages =========================================================================
# we will be using mlbench to generate synthetic data
if (!require(mlbench, quietly = TRUE)) {
install.packages("mlbench")
library(mlbench)
}
# we will be using the Mclust function in the mclust package for clustering
if (!require(mclust, quietly = TRUE)) {
install.packages("mclust")
library(mclust)
}
# we will be using a plotting function from factoextra to plot our data
if (!require(factoextra, quietly = TRUE)) {
install.packages("factoextra")
library(factoextra)
}
# we will be using a data imputation function from the mix package
# to deal with NA values in our gene expression data set
if (!require(mix, quietly = TRUE)) {
install.packages("mix")
library(mix)
}
# we will use 3dplot as an alternative plotting tool
if (!require(rgl, quietly = TRUE)) {
install.packages("rgl")
library(rgl)
}
# = 2. Generate synthetic data ==================================================================
set.seed(100)
# we will first start by confirming our clustering algorithm functions properly
# by using it on synthetic data and comparing its results with the synthetic data clusters
# the function mlbench.2dnormals(n, cl=2, r=sqrt(cl), sd=1) generates n normally distributed
# 2D data points. The parameter cl refers to the number of classes (or clusters)
# in the data set. More information on the parameters can be found here:
# https://www.rdocumentation.org/packages/mlbench/versions/2.1-1/topics/mlbench.2dnormals
# we will use mlbench.2dnormals to generate 100 2D data points that are grouped into one of 2 classes (clusters)
synthetic_data <- mlbench.2dnormals(100,2)
# synthetic_data$x is a matrix that shows the data points that were generated
head(synthetic_data$x)
# synthetic_data$classes is a vector that shows what cluster number each data point is assigned to
head(synthetic_data$classes)
# plot the points
plot(synthetic_data$x)
# plot the points with the synthetic cluster groupings
plot(synthetic_data)
# = 3. Perform clustering on synthetic data ======================================================
# Now that we have the generated data points and their associated cluster assignments,
# let's use the Mclust function to apply distribution-based clustering on the data points.
# Then we can compare how effective our Mclust function is at clustering by comparing its
# clustering assignment results with the clustering assignments from mlbench.2dnormals
# model-based-clustering using the Mclust function
# type ?Mclust for more details about the function outputs
clustered_synthetic_data <- Mclust(synthetic_data$x)
# = 4. Compare clustering results with synthetic data clusters ===================================
# After we pass our model through the Mclust function, let's take a look at the results
# the $G attribute shows the number of clusters that Mclust decided was optimal for the data set
clustered_synthetic_data$G
# result is 2, which is the same as ml2bench.2dnormals; so far so good
# plot the points with the cluster groupings
plot(clustered_synthetic_data,"classification")
# compare the the synthetic data clusters, with the clusters generated from the Mclust function
# cycle between the two plots below:
# synthetic clusters
plot(synthetic_data)
# Mclust clusters
plot(clustered_synthetic_data,"classification")
# the cluster groupings look pretty similar!
# to look into more detail, we can also check the cluster assignments for each individual data point
# synthetic cluster assignments
synthetic_data$classes
# Mclust cluster assignments
clustered_synthetic_data$classification
# The two vectors are quite similar!
# To quantify this similarity, we can see what percentage of data points have the same assignments
# between the two vectors.
# We make a logical vector that contains a TRUE value for each data point that has
# the same clustering assignment in both vectors, and FALSE otherwise.
# We then sum up the TRUE values and divide by 100 (since we have a total of 100 data points)
sum(synthetic_data$classes == clustered_synthetic_data$classification)/100
# 92% similarity...not bad!
# keep in mind, the cluster number is arbitrary, so there may be cases where the synthetic clustering
# assigned a cluster as "1", but the Mclust cluster assignment assigned that same cluster as "2"
# in which case, you would expect to see the majority of data points from the synthetic cluster "1",
# being assigned to cluster "2" in the Mclust classification
# EXAMPLE: synthetic cluster assignment vector --> 1 1 1 2 1 2
# Mclust cluster assignment vector --> 2 2 2 1 2 1
# in the above example, the cluster number assignments are different between the two vectors
# but the important thing is that the data points are grouped the same way in both vectors,
# just that the clusters have different number labels
# before we move to real data, I would encourage you to play around more with mlbench
# and try generating different data sets, perhaps with a different number of data points
# and a different number of classification/clusters
# = 5. Get data from myGOExSet.RData ============================================================
# hopefully the exercise above will convince you that the Mclust function works properly
# now let's move onto real data
# import the myGOExSet data set
data(myGOExSet)
head(myGOExSet,3)
# = 6. Clean the data ===========================================================================
# exclude character columns; only keep the numeric columns denoting time points
df <- myGOExSet[,-1:-4]
head(df)
# we need to resolve any NA values, since Mclust will raise an error if we have any NA values
# impute data for NA values
df_no_na <- imputeData(df)
# alternatively we can omit NA values (this will remove the entire gene row) by typing:
# df_no_na <- na.omit(df)
# = 7. Perform clustering on the data ============================================================
# model-based-clustering using the Mclust function
# type ?Mclust for more details about the function outputs
mc <- Mclust(df_no_na)
# = 8. Observe results of clustering =============================================================
# mc$G shows the optimal number of clusters
mc$G
# mc$z is a matrix showing the probability of each gene belonging to a particular cluster
head(mc$z,30)
# mc$classification shows the cluster assignement of each gene
head(mc$classification,30)
# = 9. Plot the data for visualization ===========================================================
# Since we are clustering all points in a time-series data set,
# each time point is treated as its own dimension.
# Therefore with 13 different time points, we are working with 13-dimensional data.
# We can confirm this by typing mc$d which tells us how many dimensions we are working with
mc$d
# Unfortunately, there isn't one good and effective way to visualize high dimensional data
# On a 2d plot, we can try to plot on only two dimensions and see how they cluster
fviz_cluster(mc,axes=c(1,2),pointsize=1.0,labelsize=0)
# Each data point is color-coded to show which cluster it belongs to
# we can manipulate which dimensions to show with axes parameter; it takes a vector of length 2
# c(1,2) shows dimensions 1 and 2 on the plot
# lets take a look dimensions 3 and 4
fviz_cluster(mc,axes=c(3,4),pointsize=1.0,labelsize=0)
# still, even with this, it is quite hard to visualize our data
# Another alternative is to plot in 3 dimensions, with each dimension being one cluster group.
# We are fortunate (this time) to have our data be clustered into only 3 groups.
# By plotting in this way, data points that are more likely to belong to a certain cluster,
# will be represented by how far along they are on the axis of that cluster group
mc$z
# a reminder that the $z attribute is a matrix showing the probability of
# each gene belonging to a particular cluster
# now plot it in 3D
plot3d(mc$z)
# seems that very few genes belong to cluster 3
# this can be confirmed by trying:
length(which(mc$classification == 3))
# only 8 out of the 281 genes are clustered into group 3
# it seems our Mclust function may not be performing so well...
# or perhaps it's due to the data itself?
# = 10. Revisiting our methods and thinking of alternatives ===========================================================
# Let's look at our cluster assignments again
mc$classification
# data points belonging to the same cluster may have some kind of meaning in context
# to the data set we are working with.
# Perhaps having similar gene expression profiles (with regards to the time-series) may help
# elucidate their function.
# Although it is very difficult to draw conclusions, seeing as we're working with so many dimensions.
# It may be better to try clustering only between two time points, rather than 12
# remove all other time points except the first two columns
two_dimensional_data <- df_no_na[,-3:-13]
# cluster
mc_tdd <- Mclust(two_dimensional_data)
# sanity check, to make sure we only have two dimensions
mc_tdd$d
mc_tdd$G
# 3 clusters generated again...hm...
# let's look at the classifications
mc_tdd$classification
# and plot
plot(mc_tdd, "classification")
# Whether this plot has more "meaning" than the previous one, is hard to say,
# However, the context is easier to interpret:
# genes that are clustered in the same group, must have similar gene expression profiles
# between the two dimensions we had in our data, i.e. the time points t0 and t10.
# Arguably, this result is easier to interpret as well as being easier to make inferences
# to potential applications of this result.
# Although, if we wanted to be thorough, we would have to cluster each pair of dimensions
# (t0 with everything, t1 with everything, ..., t120 with everything) which would take a
# lot of time.
# But this result, I believe, is more substantial and the time invested will be worthwhile
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.